From e4557d99b7c5e1de41a2a81c7fdf40f4f57d5813 Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 09:47:16 +0100 Subject: [PATCH 1/8] chore: initialize SDK regeneration branch From f543b0510b4010094bbb2ae23121eab62ab8d786 Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 09:47:44 +0100 Subject: [PATCH 2/8] chore: unfreeze files pending regen Copied 7 temporarily frozen files to .bak and updated .fernignore to protect patched versions while letting Fern overwrite originals. --- .fernignore | 16 +- src/deepgram/agent/v1/socket_client.py.bak | 342 ++++++++++++++++++ src/deepgram/listen/v1/socket_client.py.bak | 236 ++++++++++++ src/deepgram/listen/v2/socket_client.py.bak | 205 +++++++++++ src/deepgram/speak/v1/socket_client.py.bak | 237 ++++++++++++ ...ves_item_paragraphs_paragraphs_item.py.bak | 29 ++ ..._v1response_results_utterances_item.py.bak | 28 ++ ..._results_utterances_item_words_item.py.bak | 26 ++ 8 files changed, 1112 insertions(+), 7 deletions(-) create mode 100644 src/deepgram/agent/v1/socket_client.py.bak create mode 100644 src/deepgram/listen/v1/socket_client.py.bak create mode 100644 src/deepgram/listen/v2/socket_client.py.bak create mode 100644 src/deepgram/speak/v1/socket_client.py.bak create mode 100644 src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak create mode 100644 src/deepgram/types/listen_v1response_results_utterances_item.py.bak create mode 100644 src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak diff --git a/.fernignore b/.fernignore index 95cce1f0..9f7d7c1c 100644 --- a/.fernignore +++ b/.fernignore @@ -11,16 +11,18 @@ src/deepgram/client.py # - optional message param on control send_ methods (send_keep_alive, send_close_stream, etc.) # so users don't need to instantiate the type themselves for no-payload control messages # [temporarily frozen — generator bugs in construct_type call convention and exception handling] -src/deepgram/agent/v1/socket_client.py -src/deepgram/listen/v1/socket_client.py -src/deepgram/listen/v2/socket_client.py -src/deepgram/speak/v1/socket_client.py +# [REGEN: .bak copies preserved, originals unfrozen for Fern to overwrite] +src/deepgram/agent/v1/socket_client.py.bak +src/deepgram/listen/v1/socket_client.py.bak +src/deepgram/listen/v2/socket_client.py.bak +src/deepgram/speak/v1/socket_client.py.bak # Type files with manual int type corrections (Fern generates float for speaker/channel/num_words) # [temporarily frozen — waiting on internal-api-specs#205] -src/deepgram/types/listen_v1response_results_utterances_item.py -src/deepgram/types/listen_v1response_results_utterances_item_words_item.py -src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py +# [REGEN: .bak copies preserved, originals unfrozen for Fern to overwrite] +src/deepgram/types/listen_v1response_results_utterances_item.py.bak +src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak +src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak # Hand-written custom tests tests/custom/test_text_builder.py diff --git a/src/deepgram/agent/v1/socket_client.py.bak b/src/deepgram/agent/v1/socket_client.py.bak new file mode 100644 index 00000000..ce3e4aa1 --- /dev/null +++ b/src/deepgram/agent/v1/socket_client.py.bak @@ -0,0 +1,342 @@ +# This file was auto-generated by Fern from our API Definition. + +import json +import logging +import typing +from json.decoder import JSONDecodeError + +import websockets +import websockets.sync.connection as websockets_sync_connection +from ...core.events import EventEmitterMixin, EventType +from ...core.unchecked_base_model import construct_type +from .types.agent_v1agent_audio_done import AgentV1AgentAudioDone +from .types.agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking +from .types.agent_v1agent_thinking import AgentV1AgentThinking +from .types.agent_v1conversation_text import AgentV1ConversationText +from .types.agent_v1error import AgentV1Error +from .types.agent_v1function_call_request import AgentV1FunctionCallRequest +from .types.agent_v1inject_agent_message import AgentV1InjectAgentMessage +from .types.agent_v1inject_user_message import AgentV1InjectUserMessage +from .types.agent_v1injection_refused import AgentV1InjectionRefused +from .types.agent_v1keep_alive import AgentV1KeepAlive +from .types.agent_v1prompt_updated import AgentV1PromptUpdated +from .types.agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse +from .types.agent_v1send_function_call_response import AgentV1SendFunctionCallResponse +from .types.agent_v1settings import AgentV1Settings +from .types.agent_v1settings_applied import AgentV1SettingsApplied +from .types.agent_v1speak_updated import AgentV1SpeakUpdated +from .types.agent_v1update_prompt import AgentV1UpdatePrompt +from .types.agent_v1update_speak import AgentV1UpdateSpeak +from .types.agent_v1user_started_speaking import AgentV1UserStartedSpeaking +from .types.agent_v1warning import AgentV1Warning +from .types.agent_v1welcome import AgentV1Welcome + +try: + from websockets.legacy.client import WebSocketClientProtocol # type: ignore +except ImportError: + from websockets import WebSocketClientProtocol # type: ignore + +_logger = logging.getLogger(__name__) + + +def _sanitize_numeric_types(obj: typing.Any) -> typing.Any: + """ + Recursively convert float values that are whole numbers to int. + + Workaround for Fern-generated models that type integer API fields + (like sample_rate) as float, causing JSON serialization to produce + values like 44100.0 instead of 44100. The Deepgram API rejects + float representations of integer fields. + + See: https://github.com/deepgram/internal-api-specs/issues/205 + """ + if isinstance(obj, dict): + return {k: _sanitize_numeric_types(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [_sanitize_numeric_types(item) for item in obj] + elif isinstance(obj, float) and obj.is_integer(): + return int(obj) + return obj +V1SocketClientResponse = typing.Union[ + AgentV1ReceiveFunctionCallResponse, + AgentV1PromptUpdated, + AgentV1SpeakUpdated, + AgentV1InjectionRefused, + AgentV1Welcome, + AgentV1SettingsApplied, + AgentV1ConversationText, + AgentV1UserStartedSpeaking, + AgentV1AgentThinking, + AgentV1FunctionCallRequest, + AgentV1AgentStartedSpeaking, + AgentV1AgentAudioDone, + AgentV1Error, + AgentV1Warning, + bytes, +] + + +class AsyncV1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: WebSocketClientProtocol): + super().__init__() + self._websocket = websocket + + async def __aiter__(self): + async for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + async def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + await self._emit_async(EventType.OPEN, None) + try: + async for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + await self._emit_async(EventType.MESSAGE, parsed) + except Exception as exc: + await self._emit_async(EventType.ERROR, exc) + finally: + await self._emit_async(EventType.CLOSE, None) + + async def send_settings(self, message: AgentV1Settings) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1Settings. + """ + await self._send_model(message) + + async def send_update_speak(self, message: AgentV1UpdateSpeak) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdateSpeak. + """ + await self._send_model(message) + + async def send_inject_user_message(self, message: AgentV1InjectUserMessage) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1InjectUserMessage. + """ + await self._send_model(message) + + async def send_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1InjectAgentMessage. + """ + await self._send_model(message) + + async def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1SendFunctionCallResponse. + """ + await self._send_model(message) + + async def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1KeepAlive. + """ + await self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) + + async def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdatePrompt. + """ + await self._send_model(message) + + async def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + await self._send(message) + + async def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = await self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + async def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + await self._websocket.send(data) + + async def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + await self._send(_sanitize_numeric_types(data.dict())) + + +class V1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): + super().__init__() + self._websocket = websocket + + def __iter__(self): + for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + self._emit(EventType.OPEN, None) + try: + for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + self._emit(EventType.MESSAGE, parsed) + except Exception as exc: + self._emit(EventType.ERROR, exc) + finally: + self._emit(EventType.CLOSE, None) + + def send_settings(self, message: AgentV1Settings) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1Settings. + """ + self._send_model(message) + + def send_update_speak(self, message: AgentV1UpdateSpeak) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdateSpeak. + """ + self._send_model(message) + + def send_inject_user_message(self, message: AgentV1InjectUserMessage) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1InjectUserMessage. + """ + self._send_model(message) + + def send_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1InjectAgentMessage. + """ + self._send_model(message) + + def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1SendFunctionCallResponse. + """ + self._send_model(message) + + def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1KeepAlive. + """ + self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) + + def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdatePrompt. + """ + self._send_model(message) + + def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + self._send(message) + + def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + self._websocket.send(data) + + def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + self._send(_sanitize_numeric_types(data.dict())) diff --git a/src/deepgram/listen/v1/socket_client.py.bak b/src/deepgram/listen/v1/socket_client.py.bak new file mode 100644 index 00000000..387c107f --- /dev/null +++ b/src/deepgram/listen/v1/socket_client.py.bak @@ -0,0 +1,236 @@ +# This file was auto-generated by Fern from our API Definition. + +import json +import logging +import typing +from json.decoder import JSONDecodeError + +import websockets +import websockets.sync.connection as websockets_sync_connection +from ...core.events import EventEmitterMixin, EventType +from ...core.unchecked_base_model import construct_type +from .types.listen_v1close_stream import ListenV1CloseStream +from .types.listen_v1finalize import ListenV1Finalize +from .types.listen_v1keep_alive import ListenV1KeepAlive +from .types.listen_v1metadata import ListenV1Metadata +from .types.listen_v1results import ListenV1Results +from .types.listen_v1speech_started import ListenV1SpeechStarted +from .types.listen_v1utterance_end import ListenV1UtteranceEnd + +try: + from websockets.legacy.client import WebSocketClientProtocol # type: ignore +except ImportError: + from websockets import WebSocketClientProtocol # type: ignore + +_logger = logging.getLogger(__name__) +V1SocketClientResponse = typing.Union[ListenV1Results, ListenV1Metadata, ListenV1UtteranceEnd, ListenV1SpeechStarted] + + +class AsyncV1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: WebSocketClientProtocol): + super().__init__() + self._websocket = websocket + + async def __aiter__(self): + async for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + async def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + await self._emit_async(EventType.OPEN, None) + try: + async for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + await self._emit_async(EventType.MESSAGE, parsed) + except Exception as exc: + await self._emit_async(EventType.ERROR, exc) + finally: + await self._emit_async(EventType.CLOSE, None) + + async def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + await self._send(message) + + async def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1Finalize. + """ + await self._send_model(message or ListenV1Finalize(type="Finalize")) + + async def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1CloseStream. + """ + await self._send_model(message or ListenV1CloseStream(type="CloseStream")) + + async def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1KeepAlive. + """ + await self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) + + async def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = await self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + async def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + await self._websocket.send(data) + + async def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + await self._send(data.dict()) + + +class V1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): + super().__init__() + self._websocket = websocket + + def __iter__(self): + for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + self._emit(EventType.OPEN, None) + try: + for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + self._emit(EventType.MESSAGE, parsed) + except Exception as exc: + self._emit(EventType.ERROR, exc) + finally: + self._emit(EventType.CLOSE, None) + + def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + self._send(message) + + def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1Finalize. + """ + self._send_model(message or ListenV1Finalize(type="Finalize")) + + def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1CloseStream. + """ + self._send_model(message or ListenV1CloseStream(type="CloseStream")) + + def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV1KeepAlive. + """ + self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) + + def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + self._websocket.send(data) + + def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + self._send(data.dict()) diff --git a/src/deepgram/listen/v2/socket_client.py.bak b/src/deepgram/listen/v2/socket_client.py.bak new file mode 100644 index 00000000..4bf24c36 --- /dev/null +++ b/src/deepgram/listen/v2/socket_client.py.bak @@ -0,0 +1,205 @@ +# This file was auto-generated by Fern from our API Definition. + +import json +import logging +import typing +from json.decoder import JSONDecodeError + +import websockets +import websockets.sync.connection as websockets_sync_connection +from ...core.events import EventEmitterMixin, EventType +from ...core.unchecked_base_model import construct_type +from .types.listen_v2close_stream import ListenV2CloseStream +from .types.listen_v2connected import ListenV2Connected +from .types.listen_v2fatal_error import ListenV2FatalError +from .types.listen_v2turn_info import ListenV2TurnInfo + +try: + from websockets.legacy.client import WebSocketClientProtocol # type: ignore +except ImportError: + from websockets import WebSocketClientProtocol # type: ignore + +_logger = logging.getLogger(__name__) +V2SocketClientResponse = typing.Union[ListenV2Connected, ListenV2TurnInfo, ListenV2FatalError] + + +class AsyncV2SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: WebSocketClientProtocol): + super().__init__() + self._websocket = websocket + + async def __aiter__(self): + async for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + async def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + await self._emit_async(EventType.OPEN, None) + try: + async for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + await self._emit_async(EventType.MESSAGE, parsed) + except Exception as exc: + await self._emit_async(EventType.ERROR, exc) + finally: + await self._emit_async(EventType.CLOSE, None) + + async def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + await self._send(message) + + async def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV2CloseStream. + """ + await self._send_model(message or ListenV2CloseStream(type="CloseStream")) + + async def recv(self) -> V2SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = await self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + async def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + await self._websocket.send(data) + + async def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + await self._send(data.dict()) + + +class V2SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): + super().__init__() + self._websocket = websocket + + def __iter__(self): + for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + self._emit(EventType.OPEN, None) + try: + for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + self._emit(EventType.MESSAGE, parsed) + except Exception as exc: + self._emit(EventType.ERROR, exc) + finally: + self._emit(EventType.CLOSE, None) + + def send_media(self, message: bytes) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a bytes. + """ + self._send(message) + + def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a ListenV2CloseStream. + """ + self._send_model(message or ListenV2CloseStream(type="CloseStream")) + + def recv(self) -> V2SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + self._websocket.send(data) + + def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + self._send(data.dict()) diff --git a/src/deepgram/speak/v1/socket_client.py.bak b/src/deepgram/speak/v1/socket_client.py.bak new file mode 100644 index 00000000..671e0bd2 --- /dev/null +++ b/src/deepgram/speak/v1/socket_client.py.bak @@ -0,0 +1,237 @@ +# This file was auto-generated by Fern from our API Definition. + +import json +import logging +import typing +from json.decoder import JSONDecodeError + +import websockets +import websockets.sync.connection as websockets_sync_connection +from ...core.events import EventEmitterMixin, EventType +from ...core.unchecked_base_model import construct_type +from .types.speak_v1clear import SpeakV1Clear +from .types.speak_v1cleared import SpeakV1Cleared +from .types.speak_v1close import SpeakV1Close +from .types.speak_v1flush import SpeakV1Flush +from .types.speak_v1flushed import SpeakV1Flushed +from .types.speak_v1metadata import SpeakV1Metadata +from .types.speak_v1text import SpeakV1Text +from .types.speak_v1warning import SpeakV1Warning + +try: + from websockets.legacy.client import WebSocketClientProtocol # type: ignore +except ImportError: + from websockets import WebSocketClientProtocol # type: ignore + +_logger = logging.getLogger(__name__) +V1SocketClientResponse = typing.Union[bytes, SpeakV1Metadata, SpeakV1Flushed, SpeakV1Cleared, SpeakV1Warning] + + +class AsyncV1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: WebSocketClientProtocol): + super().__init__() + self._websocket = websocket + + async def __aiter__(self): + async for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + async def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + await self._emit_async(EventType.OPEN, None) + try: + async for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + await self._emit_async(EventType.MESSAGE, parsed) + except Exception as exc: + await self._emit_async(EventType.ERROR, exc) + finally: + await self._emit_async(EventType.CLOSE, None) + + async def send_text(self, message: SpeakV1Text) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Text. + """ + await self._send_model(message) + + async def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Flush. + """ + await self._send_model(message or SpeakV1Flush(type="Flush")) + + async def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Clear. + """ + await self._send_model(message or SpeakV1Clear(type="Clear")) + + async def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Close. + """ + await self._send_model(message or SpeakV1Close(type="Close")) + + async def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = await self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + async def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + await self._websocket.send(data) + + async def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + await self._send(data.dict()) + + +class V1SocketClient(EventEmitterMixin): + def __init__(self, *, websocket: websockets_sync_connection.Connection): + super().__init__() + self._websocket = websocket + + def __iter__(self): + for message in self._websocket: + if isinstance(message, bytes): + yield message + else: + try: + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + + def start_listening(self): + """ + Start listening for messages on the websocket connection. + + Emits events in the following order: + - EventType.OPEN when connection is established + - EventType.MESSAGE for each message received + - EventType.ERROR if an error occurs + - EventType.CLOSE when connection is closed + """ + self._emit(EventType.OPEN, None) + try: + for raw_message in self._websocket: + if isinstance(raw_message, bytes): + parsed = raw_message + else: + json_data = json.loads(raw_message) + try: + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning( + "Skipping unknown WebSocket message; update your SDK version to support new message types." + ) + continue + self._emit(EventType.MESSAGE, parsed) + except Exception as exc: + self._emit(EventType.ERROR, exc) + finally: + self._emit(EventType.CLOSE, None) + + def send_text(self, message: SpeakV1Text) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Text. + """ + self._send_model(message) + + def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Flush. + """ + self._send_model(message or SpeakV1Flush(type="Flush")) + + def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Clear. + """ + self._send_model(message or SpeakV1Clear(type="Clear")) + + def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a SpeakV1Close. + """ + self._send_model(message or SpeakV1Close(type="Close")) + + def recv(self) -> V1SocketClientResponse: + """ + Receive a message from the websocket connection. + """ + data = self._websocket.recv() + if isinstance(data, bytes): + return data # type: ignore + json_data = json.loads(data) + try: + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + except Exception: + _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") + return json_data # type: ignore + + def _send(self, data: typing.Any) -> None: + """ + Send a message to the websocket connection. + """ + if isinstance(data, dict): + data = json.dumps(data) + self._websocket.send(data) + + def _send_model(self, data: typing.Any) -> None: + """ + Send a Pydantic model to the websocket connection. + """ + self._send(data.dict()) diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak new file mode 100644 index 00000000..eec3866b --- /dev/null +++ b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item_sentences_item import ( + ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem, +) + + +class ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem(UncheckedBaseModel): + sentences: typing.Optional[ + typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem] + ] = None + speaker: typing.Optional[int] = None + num_words: typing.Optional[int] = None + start: typing.Optional[float] = None + end: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/listen_v1response_results_utterances_item.py.bak b/src/deepgram/types/listen_v1response_results_utterances_item.py.bak new file mode 100644 index 00000000..0947d9f5 --- /dev/null +++ b/src/deepgram/types/listen_v1response_results_utterances_item.py.bak @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .listen_v1response_results_utterances_item_words_item import ListenV1ResponseResultsUtterancesItemWordsItem + + +class ListenV1ResponseResultsUtterancesItem(UncheckedBaseModel): + start: typing.Optional[float] = None + end: typing.Optional[float] = None + confidence: typing.Optional[float] = None + channel: typing.Optional[int] = None + transcript: typing.Optional[str] = None + words: typing.Optional[typing.List[ListenV1ResponseResultsUtterancesItemWordsItem]] = None + speaker: typing.Optional[int] = None + id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak new file mode 100644 index 00000000..6cd1313a --- /dev/null +++ b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class ListenV1ResponseResultsUtterancesItemWordsItem(UncheckedBaseModel): + word: typing.Optional[str] = None + start: typing.Optional[float] = None + end: typing.Optional[float] = None + confidence: typing.Optional[float] = None + speaker: typing.Optional[int] = None + speaker_confidence: typing.Optional[float] = None + punctuated_word: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow From 272a566df95bedcd801ae8038845781510cb5e2f Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Tue, 14 Apr 2026 08:49:54 +0000 Subject: [PATCH 3/8] SDK regeneration --- .fern/metadata.json | 6 +- context7.json | 4 - poetry.lock | 260 +++---- pyproject.toml | 2 +- src/deepgram/__init__.py | 250 ++++++- src/deepgram/agent/__init__.py | 618 +---------------- src/deepgram/agent/v1/__init__.py | 618 +---------------- src/deepgram/agent/v1/requests/__init__.py | 254 +------ .../v1/requests/agent_v1agent_audio_done.py | 5 +- .../agent_v1agent_started_speaking.py | 5 +- .../v1/requests/agent_v1agent_thinking.py | 5 +- .../v1/requests/agent_v1conversation_text.py | 5 +- .../agent/v1/requests/agent_v1error.py | 5 +- .../requests/agent_v1function_call_request.py | 3 +- ..._v1function_call_request_functions_item.py | 5 + .../requests/agent_v1inject_agent_message.py | 5 +- .../requests/agent_v1inject_user_message.py | 5 +- .../v1/requests/agent_v1injection_refused.py | 5 +- .../agent/v1/requests/agent_v1keep_alive.py | 5 +- .../v1/requests/agent_v1prompt_updated.py | 5 +- .../agent_v1receive_function_call_response.py | 5 +- .../agent_v1send_function_call_response.py | 5 +- .../agent/v1/requests/agent_v1settings.py | 3 +- ...ngs_agent_context_messages_item_content.py | 7 +- ...nt_context_messages_item_function_calls.py | 5 +- ...item_function_calls_function_calls_item.py | 5 + .../agent_v1settings_agent_listen_provider.py | 6 +- ...ent_v1settings_agent_listen_provider_v1.py | 3 +- ...ent_v1settings_agent_listen_provider_v2.py | 3 +- .../requests/agent_v1settings_agent_speak.py | 7 +- .../agent_v1settings_agent_speak_endpoint.py | 14 - ...1settings_agent_speak_endpoint_endpoint.py | 19 - ...1settings_agent_speak_endpoint_provider.py | 93 --- ...agent_speak_endpoint_provider_aws_polly.py | 32 - ...endpoint_provider_aws_polly_credentials.py | 17 - ..._speak_endpoint_provider_cartesia_voice.py | 15 - ...s_agent_speak_endpoint_provider_open_ai.py | 29 - .../agent_v1settings_agent_speak_one_item.py | 14 - ...1settings_agent_speak_one_item_endpoint.py | 19 - ...1settings_agent_speak_one_item_provider.py | 93 --- ...agent_speak_one_item_provider_aws_polly.py | 32 - ...one_item_provider_aws_polly_credentials.py | 17 - ..._speak_one_item_provider_cartesia_voice.py | 15 - ...s_agent_speak_one_item_provider_open_ai.py | 29 - .../requests/agent_v1settings_agent_think.py | 5 +- .../agent_v1settings_agent_think_one_item.py | 24 - ...ngs_agent_think_one_item_context_length.py | 7 - ...1settings_agent_think_one_item_provider.py | 13 - .../v1/requests/agent_v1settings_applied.py | 5 +- .../v1/requests/agent_v1speak_updated.py | 5 +- .../v1/requests/agent_v1think_updated.py | 12 + .../v1/requests/agent_v1update_prompt.py | 5 +- .../agent/v1/requests/agent_v1update_speak.py | 5 +- .../v1/requests/agent_v1update_speak_speak.py | 7 +- .../agent_v1update_speak_speak_endpoint.py | 14 - ..._v1update_speak_speak_endpoint_endpoint.py | 19 - ..._v1update_speak_speak_endpoint_provider.py | 93 --- ...speak_speak_endpoint_provider_aws_polly.py | 32 - ...endpoint_provider_aws_polly_credentials.py | 17 - ..._speak_speak_endpoint_provider_cartesia.py | 30 - ..._speak_endpoint_provider_cartesia_voice.py | 15 - ..._speak_speak_endpoint_provider_deepgram.py | 21 - ...eak_speak_endpoint_provider_eleven_labs.py | 31 - ...e_speak_speak_endpoint_provider_open_ai.py | 29 - .../agent_v1update_speak_speak_one_item.py | 14 - ..._v1update_speak_speak_one_item_provider.py | 93 --- ...speak_speak_one_item_provider_aws_polly.py | 32 - ...one_item_provider_aws_polly_credentials.py | 17 - ..._speak_speak_one_item_provider_cartesia.py | 30 - ..._speak_speak_one_item_provider_deepgram.py | 21 - ...eak_speak_one_item_provider_eleven_labs.py | 31 - ...e_speak_speak_one_item_provider_open_ai.py | 29 - .../agent/v1/requests/agent_v1update_think.py | 15 + .../v1/requests/agent_v1update_think_think.py | 7 + .../requests/agent_v1user_started_speaking.py | 5 +- .../agent/v1/requests/agent_v1warning.py | 5 +- .../agent/v1/requests/agent_v1welcome.py | 5 +- src/deepgram/agent/v1/requests/cartesia.py | 30 - src/deepgram/agent/v1/requests/deepgram.py | 21 - src/deepgram/agent/v1/requests/eleven_labs.py | 31 - src/deepgram/agent/v1/socket_client.py | 65 +- src/deepgram/agent/v1/types/__init__.py | 554 +-------------- .../v1/types/agent_v1agent_audio_done.py | 3 +- .../v1/types/agent_v1agent_audio_done_type.py | 5 - .../types/agent_v1agent_started_speaking.py | 3 +- .../agent_v1agent_started_speaking_type.py | 5 - .../agent/v1/types/agent_v1agent_thinking.py | 3 +- .../v1/types/agent_v1agent_thinking_type.py | 5 - .../v1/types/agent_v1conversation_text.py | 3 +- .../types/agent_v1conversation_text_type.py | 5 - src/deepgram/agent/v1/types/agent_v1error.py | 3 +- .../agent/v1/types/agent_v1error_type.py | 5 - .../v1/types/agent_v1function_call_request.py | 3 +- ..._v1function_call_request_functions_item.py | 5 + .../agent_v1function_call_request_type.py | 5 - .../v1/types/agent_v1inject_agent_message.py | 3 +- .../agent_v1inject_agent_message_type.py | 5 - .../v1/types/agent_v1inject_user_message.py | 3 +- .../types/agent_v1inject_user_message_type.py | 5 - .../v1/types/agent_v1injection_refused.py | 3 +- .../types/agent_v1injection_refused_type.py | 5 - .../agent/v1/types/agent_v1keep_alive.py | 3 +- .../agent/v1/types/agent_v1prompt_updated.py | 3 +- .../v1/types/agent_v1prompt_updated_type.py | 5 - .../agent_v1receive_function_call_response.py | 3 +- ...t_v1receive_function_call_response_type.py | 5 - .../agent_v1send_function_call_response.py | 3 +- ...gent_v1send_function_call_response_type.py | 5 - .../agent/v1/types/agent_v1settings.py | 3 +- ...ngs_agent_context_messages_item_content.py | 5 +- ...gent_context_messages_item_content_type.py | 5 - ...nt_context_messages_item_function_calls.py | 5 +- ...item_function_calls_function_calls_item.py | 5 + ...ntext_messages_item_function_calls_type.py | 5 - .../agent_v1settings_agent_listen_provider.py | 6 +- ...ent_v1settings_agent_listen_provider_v1.py | 3 +- ...v1settings_agent_listen_provider_v1type.py | 5 - ...ent_v1settings_agent_listen_provider_v2.py | 3 +- ...v1settings_agent_listen_provider_v2type.py | 5 - .../v1/types/agent_v1settings_agent_speak.py | 7 +- .../agent_v1settings_agent_speak_endpoint.py | 27 - ...1settings_agent_speak_endpoint_endpoint.py | 30 - ...1settings_agent_speak_endpoint_provider.py | 144 ---- ...agent_speak_endpoint_provider_aws_polly.py | 45 -- ...endpoint_provider_aws_polly_credentials.py | 30 - ...int_provider_aws_polly_credentials_type.py | 7 - ...peak_endpoint_provider_aws_polly_engine.py | 7 - ...speak_endpoint_provider_aws_polly_voice.py | 7 - ...eak_endpoint_provider_cartesia_model_id.py | 7 - ...peak_endpoint_provider_cartesia_version.py | 5 - ..._speak_endpoint_provider_deepgram_model.py | 72 -- ...peak_endpoint_provider_deepgram_version.py | 5 - ..._endpoint_provider_eleven_labs_model_id.py | 7 - ...k_endpoint_provider_eleven_labs_version.py | 5 - ...s_agent_speak_endpoint_provider_open_ai.py | 42 -- ...t_speak_endpoint_provider_open_ai_model.py | 5 - ...speak_endpoint_provider_open_ai_version.py | 5 - ...t_speak_endpoint_provider_open_ai_voice.py | 7 - .../agent_v1settings_agent_speak_one_item.py | 27 - ...1settings_agent_speak_one_item_endpoint.py | 30 - ...1settings_agent_speak_one_item_provider.py | 144 ---- ...agent_speak_one_item_provider_aws_polly.py | 45 -- ...one_item_provider_aws_polly_credentials.py | 30 - ...tem_provider_aws_polly_credentials_type.py | 5 - ...peak_one_item_provider_aws_polly_engine.py | 7 - ...speak_one_item_provider_aws_polly_voice.py | 7 - ...eak_one_item_provider_cartesia_model_id.py | 7 - ...peak_one_item_provider_cartesia_version.py | 5 - ..._speak_one_item_provider_deepgram_model.py | 72 -- ...peak_one_item_provider_deepgram_version.py | 5 - ..._one_item_provider_eleven_labs_model_id.py | 7 - ...k_one_item_provider_eleven_labs_version.py | 5 - ...s_agent_speak_one_item_provider_open_ai.py | 42 -- ...t_speak_one_item_provider_open_ai_model.py | 5 - ...speak_one_item_provider_open_ai_version.py | 5 - ...t_speak_one_item_provider_open_ai_voice.py | 7 - .../v1/types/agent_v1settings_agent_think.py | 3 +- .../agent_v1settings_agent_think_one_item.py | 35 - ...ngs_agent_think_one_item_context_length.py | 7 - ...1settings_agent_think_one_item_provider.py | 13 - .../v1/types/agent_v1settings_applied.py | 3 +- .../v1/types/agent_v1settings_applied_type.py | 5 - .../agent/v1/types/agent_v1speak_updated.py | 3 +- .../v1/types/agent_v1speak_updated_type.py | 5 - ...esia_voice.py => agent_v1think_updated.py} | 11 +- .../agent/v1/types/agent_v1update_prompt.py | 3 +- .../v1/types/agent_v1update_prompt_type.py | 5 - .../agent/v1/types/agent_v1update_speak.py | 3 +- .../v1/types/agent_v1update_speak_speak.py | 5 +- .../agent_v1update_speak_speak_endpoint.py | 27 - ..._v1update_speak_speak_endpoint_endpoint.py | 30 - ..._v1update_speak_speak_endpoint_provider.py | 144 ---- ...speak_speak_endpoint_provider_aws_polly.py | 45 -- ...endpoint_provider_aws_polly_credentials.py | 30 - ...int_provider_aws_polly_credentials_type.py | 5 - ...peak_endpoint_provider_aws_polly_engine.py | 7 - ...speak_endpoint_provider_aws_polly_voice.py | 7 - ..._speak_speak_endpoint_provider_cartesia.py | 43 -- ...eak_endpoint_provider_cartesia_model_id.py | 7 - ...peak_endpoint_provider_cartesia_version.py | 5 - ..._speak_speak_endpoint_provider_deepgram.py | 34 - ..._speak_endpoint_provider_deepgram_model.py | 72 -- ...peak_endpoint_provider_deepgram_version.py | 5 - ...eak_speak_endpoint_provider_eleven_labs.py | 44 -- ..._endpoint_provider_eleven_labs_model_id.py | 7 - ...k_endpoint_provider_eleven_labs_version.py | 5 - ...e_speak_speak_endpoint_provider_open_ai.py | 42 -- ...k_speak_endpoint_provider_open_ai_model.py | 5 - ...speak_endpoint_provider_open_ai_version.py | 5 - ...k_speak_endpoint_provider_open_ai_voice.py | 7 - .../agent_v1update_speak_speak_one_item.py | 27 - ..._v1update_speak_speak_one_item_provider.py | 144 ---- ...speak_speak_one_item_provider_aws_polly.py | 45 -- ...tem_provider_aws_polly_credentials_type.py | 5 - ...peak_one_item_provider_aws_polly_engine.py | 7 - ..._speak_speak_one_item_provider_cartesia.py | 43 -- ...eak_one_item_provider_cartesia_model_id.py | 7 - ...peak_one_item_provider_cartesia_version.py | 5 - ..._speak_speak_one_item_provider_deepgram.py | 34 - ...peak_one_item_provider_deepgram_version.py | 5 - ...eak_speak_one_item_provider_eleven_labs.py | 44 -- ...k_one_item_provider_eleven_labs_version.py | 5 - ...e_speak_speak_one_item_provider_open_ai.py | 42 -- ...k_speak_one_item_provider_open_ai_model.py | 5 - ...speak_one_item_provider_open_ai_version.py | 5 - ...k_speak_one_item_provider_open_ai_voice.py | 7 - .../v1/types/agent_v1update_speak_type.py | 5 - ...tesia_voice.py => agent_v1update_think.py} | 12 +- .../v1/types/agent_v1update_think_think.py | 7 + .../v1/types/agent_v1user_started_speaking.py | 3 +- .../agent_v1user_started_speaking_type.py | 5 - .../agent/v1/types/agent_v1warning.py | 3 +- .../agent/v1/types/agent_v1welcome.py | 3 +- .../agent/v1/types/agent_v1welcome_type.py | 5 - src/deepgram/agent/v1/types/cartesia.py | 43 -- src/deepgram/agent/v1/types/deepgram.py | 34 - src/deepgram/agent/v1/types/eleven_labs.py | 44 -- src/deepgram/base_client.py | 19 + src/deepgram/core/client_wrapper.py | 4 +- src/deepgram/listen/__init__.py | 27 +- src/deepgram/listen/v1/__init__.py | 30 +- src/deepgram/listen/v1/media/__init__.py | 18 +- src/deepgram/listen/v1/media/client.py | 54 +- src/deepgram/listen/v1/media/raw_client.py | 54 +- .../listen/v1/media/types/__init__.py | 18 +- ...o.py => media_transcribe_request_model.py} | 2 +- .../media_transcribe_request_summarize.py} | 2 +- ...media_transcribe_request_summarize_zero.py | 5 - .../types/media_transcribe_request_version.py | 5 + .../media_transcribe_request_version_zero.py | 5 - .../listen/v1/requests/listen_v1metadata.py | 5 +- .../listen/v1/requests/listen_v1results.py | 3 +- .../v1/requests/listen_v1speech_started.py | 3 +- .../v1/requests/listen_v1utterance_end.py | 3 +- src/deepgram/listen/v1/socket_client.py | 40 +- src/deepgram/listen/v1/types/__init__.py | 12 - .../listen/v1/types/listen_v1metadata.py | 3 +- .../listen/v1/types/listen_v1metadata_type.py | 5 - .../listen/v1/types/listen_v1results.py | 3 +- .../listen/v1/types/listen_v1results_type.py | 5 - .../v1/types/listen_v1speech_started.py | 3 +- .../v1/types/listen_v1speech_started_type.py | 5 - .../listen/v1/types/listen_v1utterance_end.py | 3 +- .../v1/types/listen_v1utterance_end_type.py | 5 - src/deepgram/listen/v2/__init__.py | 15 +- src/deepgram/listen/v2/requests/__init__.py | 3 + .../v2/requests/listen_v2configure_failure.py | 24 + .../listen/v2/requests/listen_v2connected.py | 5 +- .../v2/requests/listen_v2fatal_error.py | 5 +- .../listen/v2/requests/listen_v2turn_info.py | 16 +- src/deepgram/listen/v2/socket_client.py | 43 +- src/deepgram/listen/v2/types/__init__.py | 12 +- .../v2/types/listen_v2configure_failure.py} | 17 +- .../listen/v2/types/listen_v2connected.py | 3 +- .../v2/types/listen_v2connected_type.py | 5 - .../listen/v2/types/listen_v2fatal_error.py | 3 +- .../v2/types/listen_v2fatal_error_type.py | 5 - .../listen/v2/types/listen_v2turn_info.py | 16 +- .../v2/types/listen_v2turn_info_type.py | 5 - src/deepgram/read/v1/__init__.py | 6 +- src/deepgram/read/v1/text/__init__.py | 6 +- src/deepgram/read/v1/text/client.py | 10 +- src/deepgram/read/v1/text/raw_client.py | 10 +- src/deepgram/read/v1/text/types/__init__.py | 6 +- .../types/text_analyze_request_summarize.py} | 2 +- src/deepgram/requests/__init__.py | 112 +++ .../requests/agent_configuration_v1.py | 37 + ..._think_models_v1response_models_item_id.py | 5 +- ...think_models_v1response_models_item_one.py | 7 +- ...ink_models_v1response_models_item_three.py | 10 +- ...think_models_v1response_models_item_two.py | 7 +- ...hink_models_v1response_models_item_zero.py | 7 +- src/deepgram/requests/agent_variable_v1.py | 33 + src/deepgram/requests/anthropic.py | 24 + .../requests/aws_bedrock_think_provider.py | 25 + .../aws_bedrock_think_provider_credentials.py | 35 + .../requests/aws_polly_speak_provider.py | 29 + .../aws_polly_speak_provider_credentials.py | 15 + src/deepgram/requests/cartesia.py | 31 + .../cartesia_speak_provider_voice.py} | 2 +- .../create_agent_configuration_v1response.py | 22 + src/deepgram/requests/deepgram.py | 24 + .../requests/eleven_labs_speak_provider.py | 29 + src/deepgram/requests/google.py | 24 + src/deepgram/requests/groq.py | 23 + .../list_agent_configurations_v1response.py | 13 + .../list_agent_variables_v1response.py | 13 + .../requests/open_ai_speak_provider.py | 25 + .../requests/open_ai_think_provider.py | 24 + src/deepgram/requests/speak_settings_v1.py | 14 + .../speak_settings_v1endpoint.py} | 2 +- .../requests/speak_settings_v1provider.py | 65 ++ src/deepgram/requests/think_settings_v1.py | 17 +- .../think_settings_v1context_length.py} | 2 +- .../think_settings_v1endpoint.py} | 2 +- .../think_settings_v1functions_item.py} | 8 +- ...ink_settings_v1functions_item_endpoint.py} | 2 +- .../requests/think_settings_v1provider.py | 56 ++ src/deepgram/self_hosted/v1/__init__.py | 12 +- .../v1/distribution_credentials/__init__.py | 9 +- .../v1/distribution_credentials/client.py | 11 +- .../v1/distribution_credentials/raw_client.py | 9 +- .../types/__init__.py | 6 +- ...ion_credentials_create_request_provider.py | 5 - src/deepgram/speak/__init__.py | 9 - src/deepgram/speak/v1/__init__.py | 9 - .../speak/v1/requests/speak_v1metadata.py | 5 +- .../speak/v1/requests/speak_v1text.py | 5 +- .../speak/v1/requests/speak_v1warning.py | 5 +- src/deepgram/speak/v1/socket_client.py | 40 +- src/deepgram/speak/v1/types/__init__.py | 9 - .../speak/v1/types/speak_v1metadata.py | 3 +- .../speak/v1/types/speak_v1metadata_type.py | 5 - src/deepgram/speak/v1/types/speak_v1text.py | 3 +- .../speak/v1/types/speak_v1warning.py | 3 +- .../speak/v1/types/speak_v1warning_type.py | 5 - src/deepgram/types/__init__.py | 146 +++- src/deepgram/types/agent_configuration_v1.py | 48 ++ ..._think_models_v1response_models_item_id.py | 3 +- ...dels_v1response_models_item_id_provider.py | 5 - ...think_models_v1response_models_item_one.py | 3 +- ...els_v1response_models_item_one_provider.py | 5 - ...ink_models_v1response_models_item_three.py | 6 +- ..._models_v1response_models_item_three_id.py | 5 - ...s_v1response_models_item_three_provider.py | 5 - ...think_models_v1response_models_item_two.py | 3 +- ...els_v1response_models_item_two_provider.py | 5 - ...hink_models_v1response_models_item_zero.py | 3 +- ...ls_v1response_models_item_zero_provider.py | 5 - src/deepgram/types/agent_variable_v1.py | 44 ++ src/deepgram/types/anthropic.py | 32 +- .../types/anthropic_think_provider_model.py | 7 + .../types/aws_bedrock_think_provider.py | 33 +- .../aws_bedrock_think_provider_credentials.py | 48 ++ ...bedrock_think_provider_credentials_type.py | 5 + .../types/aws_bedrock_think_provider_model.py | 7 + .../types/aws_polly_speak_provider.py | 37 +- .../aws_polly_speak_provider_credentials.py} | 12 +- ...s_polly_speak_provider_credentials_type.py | 5 + .../types/aws_polly_speak_provider_engine.py | 5 + .../aws_polly_speak_provider_voice.py} | 2 +- src/deepgram/types/cartesia.py | 39 +- .../types/cartesia_speak_provider_model_id.py | 5 + .../cartesia_speak_provider_voice.py} | 6 +- .../create_agent_configuration_v1response.py | 33 + src/deepgram/types/deepgram.py | 32 +- .../deepgram_speak_provider_model.py} | 2 +- .../delete_agent_configuration_v1response.py} | 2 +- .../delete_agent_variable_v1response.py} | 2 +- .../types/eleven_labs_speak_provider.py | 37 +- .../eleven_labs_speak_provider_model_id.py} | 2 +- src/deepgram/types/google.py | 32 +- .../types/google_think_provider_model.py | 7 + src/deepgram/types/groq.py | 31 +- .../list_agent_configurations_v1response.py | 24 + .../types/list_agent_variables_v1response.py | 24 + ...natives_item_paragraphs_paragraphs_item.py | 4 +- ...sten_v1response_results_utterances_item.py | 4 +- ...onse_results_utterances_item_words_item.py | 2 +- src/deepgram/types/listen_v2model.py | 2 +- src/deepgram/types/open_ai_speak_provider.py | 33 +- .../types/open_ai_speak_provider_model.py | 5 + .../types/open_ai_speak_provider_voice.py | 5 + src/deepgram/types/open_ai_think_provider.py | 32 +- .../types/open_ai_think_provider_model.py | 10 + src/deepgram/types/speak_settings_v1.py | 24 +- .../speak_settings_v1endpoint.py} | 6 +- .../types/speak_settings_v1provider.py | 116 ++++ src/deepgram/types/think_settings_v1.py | 17 +- .../think_settings_v1context_length.py} | 2 +- .../think_settings_v1endpoint.py} | 6 +- .../think_settings_v1functions_item.py} | 12 +- ...ink_settings_v1functions_item_endpoint.py} | 6 +- .../types/think_settings_v1provider.py | 107 +++ src/deepgram/voice_agent/__init__.py | 34 + src/deepgram/voice_agent/client.py | 82 +++ .../voice_agent/configurations/__init__.py | 4 + .../voice_agent/configurations/client.py | 496 ++++++++++++++ .../voice_agent/configurations/raw_client.py | 635 ++++++++++++++++++ src/deepgram/voice_agent/raw_client.py | 13 + .../voice_agent/variables/__init__.py | 4 + src/deepgram/voice_agent/variables/client.py | 491 ++++++++++++++ .../voice_agent/variables/raw_client.py | 630 +++++++++++++++++ .../custom/test_websocket_control_messages.py | 186 ----- tests/wire/test_voiceAgent_configurations.py | 66 ++ tests/wire/test_voiceAgent_variables.py | 69 ++ wiremock/wiremock-mappings.json | 2 +- 387 files changed, 5223 insertions(+), 6389 deletions(-) delete mode 100644 context7.json delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_context_length.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_provider.py create mode 100644 src/deepgram/agent/v1/requests/agent_v1think_updated.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_endpoint.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_deepgram.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_deepgram.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_eleven_labs.py delete mode 100644 src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_open_ai.py create mode 100644 src/deepgram/agent/v1/requests/agent_v1update_think.py create mode 100644 src/deepgram/agent/v1/requests/agent_v1update_think_think.py delete mode 100644 src/deepgram/agent/v1/requests/cartesia.py delete mode 100644 src/deepgram/agent/v1/requests/deepgram.py delete mode 100644 src/deepgram/agent/v1/requests/eleven_labs.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1agent_audio_done_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1agent_started_speaking_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1agent_thinking_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1conversation_text_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1error_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1function_call_request_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1inject_agent_message_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1inject_user_message_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1injection_refused_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1prompt_updated_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1receive_function_call_response_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1send_function_call_response_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_context_length.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_provider.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1settings_applied_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1speak_updated_type.py rename src/deepgram/agent/v1/types/{agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py => agent_v1think_updated.py} (71%) delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_prompt_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_endpoint.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_engine.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_engine.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_model_id.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_model.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_version.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_voice.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1update_speak_type.py rename src/deepgram/agent/v1/types/{agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py => agent_v1update_think.py} (65%) create mode 100644 src/deepgram/agent/v1/types/agent_v1update_think_think.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1user_started_speaking_type.py delete mode 100644 src/deepgram/agent/v1/types/agent_v1welcome_type.py delete mode 100644 src/deepgram/agent/v1/types/cartesia.py delete mode 100644 src/deepgram/agent/v1/types/deepgram.py delete mode 100644 src/deepgram/agent/v1/types/eleven_labs.py rename src/deepgram/listen/v1/media/types/{media_transcribe_request_model_zero.py => media_transcribe_request_model.py} (94%) rename src/deepgram/{read/v1/text/types/text_analyze_request_summarize_zero.py => listen/v1/media/types/media_transcribe_request_summarize.py} (60%) delete mode 100644 src/deepgram/listen/v1/media/types/media_transcribe_request_summarize_zero.py create mode 100644 src/deepgram/listen/v1/media/types/media_transcribe_request_version.py delete mode 100644 src/deepgram/listen/v1/media/types/media_transcribe_request_version_zero.py delete mode 100644 src/deepgram/listen/v1/types/listen_v1metadata_type.py delete mode 100644 src/deepgram/listen/v1/types/listen_v1results_type.py delete mode 100644 src/deepgram/listen/v1/types/listen_v1speech_started_type.py delete mode 100644 src/deepgram/listen/v1/types/listen_v1utterance_end_type.py create mode 100644 src/deepgram/listen/v2/requests/listen_v2configure_failure.py rename src/deepgram/{agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py => listen/v2/types/listen_v2configure_failure.py} (53%) delete mode 100644 src/deepgram/listen/v2/types/listen_v2connected_type.py delete mode 100644 src/deepgram/listen/v2/types/listen_v2fatal_error_type.py delete mode 100644 src/deepgram/listen/v2/types/listen_v2turn_info_type.py rename src/deepgram/{agent/v1/types/agent_v1keep_alive_type.py => read/v1/text/types/text_analyze_request_summarize.py} (59%) create mode 100644 src/deepgram/requests/agent_configuration_v1.py create mode 100644 src/deepgram/requests/agent_variable_v1.py create mode 100644 src/deepgram/requests/anthropic.py create mode 100644 src/deepgram/requests/aws_bedrock_think_provider.py create mode 100644 src/deepgram/requests/aws_bedrock_think_provider_credentials.py create mode 100644 src/deepgram/requests/aws_polly_speak_provider.py create mode 100644 src/deepgram/requests/aws_polly_speak_provider_credentials.py create mode 100644 src/deepgram/requests/cartesia.py rename src/deepgram/{agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py => requests/cartesia_speak_provider_voice.py} (67%) create mode 100644 src/deepgram/requests/create_agent_configuration_v1response.py create mode 100644 src/deepgram/requests/deepgram.py create mode 100644 src/deepgram/requests/eleven_labs_speak_provider.py create mode 100644 src/deepgram/requests/google.py create mode 100644 src/deepgram/requests/groq.py create mode 100644 src/deepgram/requests/list_agent_configurations_v1response.py create mode 100644 src/deepgram/requests/list_agent_variables_v1response.py create mode 100644 src/deepgram/requests/open_ai_speak_provider.py create mode 100644 src/deepgram/requests/open_ai_think_provider.py create mode 100644 src/deepgram/requests/speak_settings_v1.py rename src/deepgram/{agent/v1/requests/agent_v1update_speak_speak_one_item_endpoint.py => requests/speak_settings_v1endpoint.py} (87%) create mode 100644 src/deepgram/requests/speak_settings_v1provider.py rename src/deepgram/{agent/v1/types/agent_v1settings_type.py => requests/think_settings_v1context_length.py} (50%) rename src/deepgram/{agent/v1/requests/agent_v1settings_agent_think_one_item_endpoint.py => requests/think_settings_v1endpoint.py} (83%) rename src/deepgram/{agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item.py => requests/think_settings_v1functions_item.py} (59%) rename src/deepgram/{agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item_endpoint.py => requests/think_settings_v1functions_item_endpoint.py} (81%) create mode 100644 src/deepgram/requests/think_settings_v1provider.py delete mode 100644 src/deepgram/self_hosted/v1/distribution_credentials/types/distribution_credentials_create_request_provider.py delete mode 100644 src/deepgram/speak/v1/types/speak_v1metadata_type.py delete mode 100644 src/deepgram/speak/v1/types/speak_v1warning_type.py create mode 100644 src/deepgram/types/agent_configuration_v1.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_id_provider.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_one_provider.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_three_id.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_three_provider.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_two_provider.py delete mode 100644 src/deepgram/types/agent_think_models_v1response_models_item_zero_provider.py create mode 100644 src/deepgram/types/agent_variable_v1.py create mode 100644 src/deepgram/types/anthropic_think_provider_model.py create mode 100644 src/deepgram/types/aws_bedrock_think_provider_credentials.py create mode 100644 src/deepgram/types/aws_bedrock_think_provider_credentials_type.py create mode 100644 src/deepgram/types/aws_bedrock_think_provider_model.py rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py => types/aws_polly_speak_provider_credentials.py} (57%) create mode 100644 src/deepgram/types/aws_polly_speak_provider_credentials_type.py create mode 100644 src/deepgram/types/aws_polly_speak_provider_engine.py rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_voice.py => types/aws_polly_speak_provider_voice.py} (73%) create mode 100644 src/deepgram/types/cartesia_speak_provider_model_id.py rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py => types/cartesia_speak_provider_voice.py} (73%) create mode 100644 src/deepgram/types/create_agent_configuration_v1response.py rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_model.py => types/deepgram_speak_provider_model.py} (96%) rename src/deepgram/{speak/v1/types/speak_v1text_type.py => types/delete_agent_configuration_v1response.py} (54%) rename src/deepgram/{agent/v1/types/max.py => types/delete_agent_variable_v1response.py} (56%) rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id.py => types/eleven_labs_speak_provider_model_id.py} (71%) create mode 100644 src/deepgram/types/google_think_provider_model.py create mode 100644 src/deepgram/types/list_agent_configurations_v1response.py create mode 100644 src/deepgram/types/list_agent_variables_v1response.py create mode 100644 src/deepgram/types/open_ai_speak_provider_model.py create mode 100644 src/deepgram/types/open_ai_speak_provider_voice.py create mode 100644 src/deepgram/types/open_ai_think_provider_model.py rename src/deepgram/{agent/v1/types/agent_v1update_speak_speak_one_item_endpoint.py => types/speak_settings_v1endpoint.py} (82%) create mode 100644 src/deepgram/types/speak_settings_v1provider.py rename src/deepgram/{agent/v1/types/agent_v1warning_type.py => types/think_settings_v1context_length.py} (51%) rename src/deepgram/{agent/v1/types/agent_v1settings_agent_think_one_item_endpoint.py => types/think_settings_v1endpoint.py} (80%) rename src/deepgram/{agent/v1/types/agent_v1settings_agent_think_one_item_functions_item.py => types/think_settings_v1functions_item.py} (65%) rename src/deepgram/{agent/v1/types/agent_v1settings_agent_think_one_item_functions_item_endpoint.py => types/think_settings_v1functions_item_endpoint.py} (78%) create mode 100644 src/deepgram/types/think_settings_v1provider.py create mode 100644 src/deepgram/voice_agent/__init__.py create mode 100644 src/deepgram/voice_agent/client.py create mode 100644 src/deepgram/voice_agent/configurations/__init__.py create mode 100644 src/deepgram/voice_agent/configurations/client.py create mode 100644 src/deepgram/voice_agent/configurations/raw_client.py create mode 100644 src/deepgram/voice_agent/raw_client.py create mode 100644 src/deepgram/voice_agent/variables/__init__.py create mode 100644 src/deepgram/voice_agent/variables/client.py create mode 100644 src/deepgram/voice_agent/variables/raw_client.py delete mode 100644 tests/custom/test_websocket_control_messages.py create mode 100644 tests/wire/test_voiceAgent_configurations.py create mode 100644 tests/wire/test_voiceAgent_variables.py diff --git a/.fern/metadata.json b/.fern/metadata.json index c55971bb..00c1530d 100644 --- a/.fern/metadata.json +++ b/.fern/metadata.json @@ -1,5 +1,5 @@ { - "cliVersion": "4.46.0", + "cliVersion": "4.67.1", "generatorName": "fernapi/fern-python-sdk", "generatorVersion": "4.62.0", "generatorConfig": { @@ -16,6 +16,6 @@ "skip_validation": true } }, - "originGitCommit": "879c76c78827f323e425c1640f76a6e50d6c68d3", - "sdkVersion": "6.0.2" + "originGitCommit": "aa8e0677bcaea82c02a5934c61d195b35921b33d", + "sdkVersion": "6.1.2" } \ No newline at end of file diff --git a/context7.json b/context7.json deleted file mode 100644 index a2d32f39..00000000 --- a/context7.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "url": "https://context7.com/deepgram/deepgram-python-sdk", - "public_key": "pk_hu7APZeIXQ14hNyaCBm0A" -} diff --git a/poetry.lock b/poetry.lock index a981f03d..ab52d826 100644 --- a/poetry.lock +++ b/poetry.lock @@ -49,140 +49,140 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.6" +version = "3.4.7" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2e1d8ca8611099001949d1cdfaefc510cf0f212484fe7c565f735b68c78c3c95"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e25369dc110d58ddf29b949377a93e0716d72a24f62bad72b2b39f155949c1fd"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:259695e2ccc253feb2a016303543d691825e920917e31f894ca1a687982b1de4"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:dda86aba335c902b6149a02a55b38e96287157e609200811837678214ba2b1db"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51fb3c322c81d20567019778cb5a4a6f2dc1c200b886bc0d636238e364848c89"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:4482481cb0572180b6fd976a4d5c72a30263e98564da68b86ec91f0fe35e8565"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:39f5068d35621da2881271e5c3205125cc456f54e9030d3f723288c873a71bf9"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8bea55c4eef25b0b19a0337dc4e3f9a15b00d569c77211fa8cde38684f234fb7"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:f0cdaecd4c953bfae0b6bb64910aaaca5a424ad9c72d85cb88417bb9814f7550"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:150b8ce8e830eb7ccb029ec9ca36022f756986aaaa7956aad6d9ec90089338c0"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:e68c14b04827dd76dcbd1aeea9e604e3e4b78322d8faf2f8132c7138efa340a8"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:3778fd7d7cd04ae8f54651f4a7a0bd6e39a0cf20f801720a4c21d80e9b7ad6b0"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dad6e0f2e481fffdcf776d10ebee25e0ef89f16d691f1e5dee4b586375fdc64b"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-win32.whl", hash = "sha256:74a2e659c7ecbc73562e2a15e05039f1e22c75b7c7618b4b574a3ea9118d1557"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-win_amd64.whl", hash = "sha256:aa9cccf4a44b9b62d8ba8b4dd06c649ba683e4bf04eea606d2e94cfc2d6ff4d6"}, - {file = "charset_normalizer-3.4.6-cp310-cp310-win_arm64.whl", hash = "sha256:e985a16ff513596f217cee86c21371b8cd011c0f6f056d0920aa2d926c544058"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:82060f995ab5003a2d6e0f4ad29065b7672b6593c8c63559beefe5b443242c3e"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:60c74963d8350241a79cb8feea80e54d518f72c26db618862a8f53e5023deaf9"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f6e4333fb15c83f7d1482a76d45a0818897b3d33f00efd215528ff7c51b8e35d"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bc72863f4d9aba2e8fd9085e63548a324ba706d2ea2c83b260da08a59b9482de"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9cc4fc6c196d6a8b76629a70ddfcd4635a6898756e2d9cac5565cf0654605d73"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:0c173ce3a681f309f31b87125fecec7a5d1347261ea11ebbb856fa6006b23c8c"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c907cdc8109f6c619e6254212e794d6548373cc40e1ec75e6e3823d9135d29cc"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:404a1e552cf5b675a87f0651f8b79f5f1e6fd100ee88dc612f89aa16abd4486f"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e3c701e954abf6fc03a49f7c579cc80c2c6cc52525340ca3186c41d3f33482ef"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7a6967aaf043bceabab5412ed6bd6bd26603dae84d5cb75bf8d9a74a4959d398"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5feb91325bbceade6afab43eb3b508c63ee53579fe896c77137ded51c6b6958e"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:f820f24b09e3e779fe84c3c456cb4108a7aa639b0d1f02c28046e11bfcd088ed"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b35b200d6a71b9839a46b9b7fff66b6638bb52fc9658aa58796b0326595d3021"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-win32.whl", hash = "sha256:9ca4c0b502ab399ef89248a2c84c54954f77a070f28e546a85e91da627d1301e"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-win_amd64.whl", hash = "sha256:a9e68c9d88823b274cf1e72f28cb5dc89c990edf430b0bfd3e2fb0785bfeabf4"}, - {file = "charset_normalizer-3.4.6-cp311-cp311-win_arm64.whl", hash = "sha256:97d0235baafca5f2b09cf332cc275f021e694e8362c6bb9c96fc9a0eb74fc316"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ef7fedc7a6ecbe99969cd09632516738a97eeb8bd7258bf8a0f23114c057dab"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a4ea868bc28109052790eb2b52a9ab33f3aa7adc02f96673526ff47419490e21"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:836ab36280f21fc1a03c99cd05c6b7af70d2697e374c7af0b61ed271401a72a2"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f1ce721c8a7dfec21fcbdfe04e8f68174183cf4e8188e0645e92aa23985c57ff"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e28d62a8fc7a1fa411c43bd65e346f3bce9716dc51b897fbe930c5987b402d5"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:530d548084c4a9f7a16ed4a294d459b4f229db50df689bfe92027452452943a0"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30f445ae60aad5e1f8bdbb3108e39f6fbc09f4ea16c815c66578878325f8f15a"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ac2393c73378fea4e52aa56285a3d64be50f1a12395afef9cce47772f60334c2"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:90ca27cd8da8118b18a52d5f547859cc1f8354a00cd1e8e5120df3e30d6279e5"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8e5a94886bedca0f9b78fecd6afb6629142fd2605aa70a125d49f4edc6037ee6"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:695f5c2823691a25f17bc5d5ffe79fa90972cc34b002ac6c843bb8a1720e950d"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:231d4da14bcd9301310faf492051bee27df11f2bc7549bc0bb41fef11b82daa2"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a056d1ad2633548ca18ffa2f85c202cfb48b68615129143915b8dc72a806a923"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-win32.whl", hash = "sha256:c2274ca724536f173122f36c98ce188fd24ce3dad886ec2b7af859518ce008a4"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-win_amd64.whl", hash = "sha256:c8ae56368f8cc97c7e40a7ee18e1cedaf8e780cd8bc5ed5ac8b81f238614facb"}, - {file = "charset_normalizer-3.4.6-cp312-cp312-win_arm64.whl", hash = "sha256:899d28f422116b08be5118ef350c292b36fc15ec2daeb9ea987c89281c7bb5c4"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:11afb56037cbc4b1555a34dd69151e8e069bee82e613a73bef6e714ce733585f"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:423fb7e748a08f854a08a222b983f4df1912b1daedce51a72bd24fe8f26a1843"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d73beaac5e90173ac3deb9928a74763a6d230f494e4bfb422c217a0ad8e629bf"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d60377dce4511655582e300dc1e5a5f24ba0cb229005a1d5c8d0cb72bb758ab8"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:530e8cebeea0d76bdcf93357aa5e41336f48c3dc709ac52da2bb167c5b8271d9"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:a26611d9987b230566f24a0a125f17fe0de6a6aff9f25c9f564aaa2721a5fb88"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:34315ff4fc374b285ad7f4a0bf7dcbfe769e1b104230d40f49f700d4ab6bbd84"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ddd609f9e1af8c7bd6e2aca279c931aefecd148a14402d4e368f3171769fd"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:80d0a5615143c0b3225e5e3ef22c8d5d51f3f72ce0ea6fb84c943546c7b25b6c"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:92734d4d8d187a354a556626c221cd1a892a4e0802ccb2af432a1d85ec012194"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:613f19aa6e082cf96e17e3ffd89383343d0d589abda756b7764cf78361fd41dc"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2b1a63e8224e401cafe7739f77efd3f9e7f5f2026bda4aead8e59afab537784f"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6cceb5473417d28edd20c6c984ab6fee6c6267d38d906823ebfe20b03d607dc2"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-win32.whl", hash = "sha256:d7de2637729c67d67cf87614b566626057e95c303bc0a55ffe391f5205e7003d"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-win_amd64.whl", hash = "sha256:572d7c822caf521f0525ba1bce1a622a0b85cf47ffbdae6c9c19e3b5ac3c4389"}, - {file = "charset_normalizer-3.4.6-cp313-cp313-win_arm64.whl", hash = "sha256:a4474d924a47185a06411e0064b803c68be044be2d60e50e8bddcc2649957c1f"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:9cc6e6d9e571d2f863fa77700701dae73ed5f78881efc8b3f9a4398772ff53e8"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef5960d965e67165d75b7c7ffc60a83ec5abfc5c11b764ec13ea54fbef8b4421"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b3694e3f87f8ac7ce279d4355645b3c878d24d1424581b46282f24b92f5a4ae2"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5d11595abf8dd942a77883a39d81433739b287b6aa71620f15164f8096221b30"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7bda6eebafd42133efdca535b04ccb338ab29467b3f7bf79569883676fc628db"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:bbc8c8650c6e51041ad1be191742b8b421d05bbd3410f43fa2a00c8db87678e8"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22c6f0c2fbc31e76c3b8a86fba1a56eda6166e238c29cdd3d14befdb4a4e4815"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7edbed096e4a4798710ed6bc75dcaa2a21b68b6c356553ac4823c3658d53743a"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:7f9019c9cb613f084481bd6a100b12e1547cf2efe362d873c2e31e4035a6fa43"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:58c948d0d086229efc484fe2f30c2d382c86720f55cd9bc33591774348ad44e0"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:419a9d91bd238052642a51938af8ac05da5b3343becde08d5cdeab9046df9ee1"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5273b9f0b5835ff0350c0828faea623c68bfa65b792720c453e22b25cc72930f"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:0e901eb1049fdb80f5bd11ed5ea1e498ec423102f7a9b9e4645d5b8204ff2815"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-win32.whl", hash = "sha256:b4ff1d35e8c5bd078be89349b6f3a845128e685e751b6ea1169cf2160b344c4d"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-win_amd64.whl", hash = "sha256:74119174722c4349af9708993118581686f343adc1c8c9c007d59be90d077f3f"}, - {file = "charset_normalizer-3.4.6-cp314-cp314-win_arm64.whl", hash = "sha256:e5bcc1a1ae744e0bb59641171ae53743760130600da8db48cbb6e4918e186e4e"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:ad8faf8df23f0378c6d527d8b0b15ea4a2e23c89376877c598c4870d1b2c7866"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f5ea69428fa1b49573eef0cc44a1d43bebd45ad0c611eb7d7eac760c7ae771bc"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:06a7e86163334edfc5d20fe104db92fcd666e5a5df0977cb5680a506fe26cc8e"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e1f6e2f00a6b8edb562826e4632e26d063ac10307e80f7461f7de3ad8ef3f077"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b52c68d64c1878818687a473a10547b3292e82b6f6fe483808fb1468e2f52f"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:7504e9b7dc05f99a9bbb4525c67a2c155073b44d720470a148b34166a69c054e"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:172985e4ff804a7ad08eebec0a1640ece87ba5041d565fff23c8f99c1f389484"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:4be9f4830ba8741527693848403e2c457c16e499100963ec711b1c6f2049b7c7"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:79090741d842f564b1b2827c0b82d846405b744d31e84f18d7a7b41c20e473ff"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:87725cfb1a4f1f8c2fc9890ae2f42094120f4b44db9360be5d99a4c6b0e03a9e"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:fcce033e4021347d80ed9c66dcf1e7b1546319834b74445f561d2e2221de5659"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:ca0276464d148c72defa8bb4390cce01b4a0e425f3b50d1435aa6d7a18107602"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:197c1a244a274bb016dd8b79204850144ef77fe81c5b797dc389327adb552407"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-win32.whl", hash = "sha256:2a24157fa36980478dd1770b585c0f30d19e18f4fb0c47c13aa568f871718579"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-win_amd64.whl", hash = "sha256:cd5e2801c89992ed8c0a3f0293ae83c159a60d9a5d685005383ef4caca77f2c4"}, - {file = "charset_normalizer-3.4.6-cp314-cp314t-win_arm64.whl", hash = "sha256:47955475ac79cc504ef2704b192364e51d0d473ad452caedd0002605f780101c"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:659a1e1b500fac8f2779dd9e1570464e012f43e580371470b45277a27baa7532"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f61aa92e4aad0be58eb6eb4e0c21acf32cf8065f4b2cae5665da756c4ceef982"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f50498891691e0864dc3da965f340fada0771f6142a378083dc4608f4ea513e2"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:bf625105bb9eef28a56a943fec8c8a98aeb80e7d7db99bd3c388137e6eb2d237"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2bd9d128ef93637a5d7a6af25363cf5dec3fa21cf80e68055aad627f280e8afa"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_armv7l.whl", hash = "sha256:d08ec48f0a1c48d75d0356cea971921848fb620fdeba805b28f937e90691209f"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1ed80ff870ca6de33f4d953fda4d55654b9a2b340ff39ab32fa3adbcd718f264"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f98059e4fcd3e3e4e2d632b7cf81c2faae96c43c60b569e9c621468082f1d104"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:ab30e5e3e706e3063bc6de96b118688cb10396b70bb9864a430f67df98c61ecc"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d5f5d1e9def3405f60e3ca8232d56f35c98fb7bf581efcc60051ebf53cb8b611"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:461598cd852bfa5a61b09cae2b1c02e2efcd166ee5516e243d540ac24bfa68a7"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:71be7e0e01753a89cf024abf7ecb6bca2c81738ead80d43004d9b5e3f1244e64"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:df01808ee470038c3f8dc4f48620df7225c49c2d6639e38f96e6d6ac6e6f7b0e"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-win32.whl", hash = "sha256:69dd852c2f0ad631b8b60cfbe25a28c0058a894de5abb566619c205ce0550eae"}, - {file = "charset_normalizer-3.4.6-cp38-cp38-win_amd64.whl", hash = "sha256:517ad0e93394ac532745129ceabdf2696b609ec9f87863d337140317ebce1c14"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:31215157227939b4fb3d740cd23fe27be0439afef67b785a1eb78a3ae69cba9e"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecbbd45615a6885fe3240eb9db73b9e62518b611850fdf8ab08bd56de7ad2b17"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c45a03a4c69820a399f1dda9e1d8fbf3562eda46e7720458180302021b08f778"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e8aeb10fcbe92767f0fa69ad5a72deca50d0dca07fbde97848997d778a50c9fe"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54fae94be3d75f3e573c9a1b5402dc593de19377013c9a0e4285e3d402dd3a2a"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:2f7fdd9b6e6c529d6a2501a2d36b240109e78a8ceaef5687cfcfa2bbe671d297"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4d1d02209e06550bdaef34af58e041ad71b88e624f5d825519da3a3308e22687"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8bc5f0687d796c05b1e28ab0d38a50e6309906ee09375dd3aff6a9c09dd6e8f4"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ee4ec14bc1680d6b0afab9aea2ef27e26d2024f18b24a2d7155a52b60da7e833"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:d1a2ee9c1499fc8f86f4521f27a973c914b211ffa87322f4ee33bb35392da2c5"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:48696db7f18afb80a068821504296eb0787d9ce239b91ca15059d1d3eaacf13b"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4f41da960b196ea355357285ad1316a00099f22d0929fe168343b99b254729c9"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:802168e03fba8bbc5ce0d866d589e4b1ca751d06edee69f7f3a19c5a9fe6b597"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-win32.whl", hash = "sha256:8761ac29b6c81574724322a554605608a9960769ea83d2c73e396f3df896ad54"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-win_amd64.whl", hash = "sha256:1cf0a70018692f85172348fe06d3a4b63f94ecb055e13a00c644d368eb82e5b8"}, - {file = "charset_normalizer-3.4.6-cp39-cp39-win_arm64.whl", hash = "sha256:3516bbb8d42169de9e61b8520cbeeeb716f12f4ecfe3fd30a9919aa16c806ca8"}, - {file = "charset_normalizer-3.4.6-py3-none-any.whl", hash = "sha256:947cf925bc916d90adba35a64c82aace04fa39b46b52d4630ece166655905a69"}, - {file = "charset_normalizer-3.4.6.tar.gz", hash = "sha256:1ae6b62897110aa7c79ea2f5dd38d1abca6db663687c0b1ad9aed6f6bae3d9d6"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cdd68a1fb318e290a2077696b7eb7a21a49163c455979c639bf5a5dcdc46617d"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e17b8d5d6a8c47c85e68ca8379def1303fd360c3e22093a807cd34a71cd082b8"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:511ef87c8aec0783e08ac18565a16d435372bc1ac25a91e6ac7f5ef2b0bff790"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:007d05ec7321d12a40227aae9e2bc6dca73f3cb21058999a1df9e193555a9dcc"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cf29836da5119f3c8a8a70667b0ef5fdca3bb12f80fd06487cfa575b3909b393"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux_2_31_armv7l.whl", hash = "sha256:12d8baf840cc7889b37c7c770f478adea7adce3dcb3944d02ec87508e2dcf153"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d560742f3c0d62afaccf9f41fe485ed69bd7661a241f86a3ef0f0fb8b1a397af"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b14b2d9dac08e28bb8046a1a0434b1750eb221c8f5b87a68f4fa11a6f97b5e34"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:bc17a677b21b3502a21f66a8cc64f5bfad4df8a0b8434d661666f8ce90ac3af1"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:750e02e074872a3fad7f233b47734166440af3cdea0add3e95163110816d6752"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:4e5163c14bffd570ef2affbfdd77bba66383890797df43dc8b4cc7d6f500bf53"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6ed74185b2db44f41ef35fd1617c5888e59792da9bbc9190d6c7300617182616"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:94e1885b270625a9a828c9793b4d52a64445299baa1fea5a173bf1d3dd9a1a5a"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-win32.whl", hash = "sha256:6785f414ae0f3c733c437e0f3929197934f526d19dfaa75e18fdb4f94c6fb374"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:6696b7688f54f5af4462118f0bfa7c1621eeb87154f77fa04b9295ce7a8f2943"}, + {file = "charset_normalizer-3.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:66671f93accb62ed07da56613636f3641f1a12c13046ce91ffc923721f23c008"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7641bb8895e77f921102f72833904dcd9901df5d6d72a2ab8f31d04b7e51e4e7"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:202389074300232baeb53ae2569a60901f7efadd4245cf3a3bf0617d60b439d7"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:30b8d1d8c52a48c2c5690e152c169b673487a2a58de1ec7393196753063fcd5e"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:532bc9bf33a68613fd7d65e4b1c71a6a38d7d42604ecf239c77392e9b4e8998c"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2fe249cb4651fd12605b7288b24751d8bfd46d35f12a20b1ba33dea122e690df"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux_2_31_armv7l.whl", hash = "sha256:65bcd23054beab4d166035cabbc868a09c1a49d1efe458fe8e4361215df40265"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:08e721811161356f97b4059a9ba7bafb23ea5ee2255402c42881c214e173c6b4"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e060d01aec0a910bdccb8be71faf34e7799ce36950f8294c8bf612cba65a2c9e"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:38c0109396c4cfc574d502df99742a45c72c08eff0a36158b6f04000043dbf38"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:1c2a768fdd44ee4a9339a9b0b130049139b8ce3c01d2ce09f67f5a68048d477c"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:1a87ca9d5df6fe460483d9a5bbf2b18f620cbed41b432e2bddb686228282d10b"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:d635aab80466bc95771bb78d5370e74d36d1fe31467b6b29b8b57b2a3cd7d22c"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ae196f021b5e7c78e918242d217db021ed2a6ace2bc6ae94c0fc596221c7f58d"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-win32.whl", hash = "sha256:adb2597b428735679446b46c8badf467b4ca5f5056aae4d51a19f9570301b1ad"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:8e385e4267ab76874ae30db04c627faaaf0b509e1ccc11a95b3fc3e83f855c00"}, + {file = "charset_normalizer-3.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:d4a48e5b3c2a489fae013b7589308a40146ee081f6f509e047e0e096084ceca1"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:eca9705049ad3c7345d574e3510665cb2cf844c2f2dcfe675332677f081cbd46"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6178f72c5508bfc5fd446a5905e698c6212932f25bcdd4b47a757a50605a90e2"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e1421b502d83040e6d7fb2fb18dff63957f720da3d77b2fbd3187ceb63755d7b"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:edac0f1ab77644605be2cbba52e6b7f630731fc42b34cb0f634be1a6eface56a"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5649fd1c7bade02f320a462fdefd0b4bd3ce036065836d4f42e0de958038e116"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:203104ed3e428044fd943bc4bf45fa73c0730391f9621e37fe39ecf477b128cb"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:298930cec56029e05497a76988377cbd7457ba864beeea92ad7e844fe74cd1f1"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:708838739abf24b2ceb208d0e22403dd018faeef86ddac04319a62ae884c4f15"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:0f7eb884681e3938906ed0434f20c63046eacd0111c4ba96f27b76084cd679f5"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4dc1e73c36828f982bfe79fadf5919923f8a6f4df2860804db9a98c48824ce8d"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:aed52fea0513bac0ccde438c188c8a471c4e0f457c2dd20cdbf6ea7a450046c7"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fea24543955a6a729c45a73fe90e08c743f0b3334bbf3201e6c4bc1b0c7fa464"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bb6d88045545b26da47aa879dd4a89a71d1dce0f0e549b1abcb31dfe4a8eac49"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-win32.whl", hash = "sha256:2257141f39fe65a3fdf38aeccae4b953e5f3b3324f4ff0daf9f15b8518666a2c"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:5ed6ab538499c8644b8a3e18debabcd7ce684f3fa91cf867521a7a0279cab2d6"}, + {file = "charset_normalizer-3.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:56be790f86bfb2c98fb742ce566dfb4816e5a83384616ab59c49e0604d49c51d"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f496c9c3cc02230093d8330875c4c3cdfc3b73612a5fd921c65d39cbcef08063"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ea948db76d31190bf08bd371623927ee1339d5f2a0b4b1b4a4439a65298703c"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a277ab8928b9f299723bc1a2dabb1265911b1a76341f90a510368ca44ad9ab66"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3bec022aec2c514d9cf199522a802bd007cd588ab17ab2525f20f9c34d067c18"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e044c39e41b92c845bc815e5ae4230804e8e7bc29e399b0437d64222d92809dd"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:f495a1652cf3fbab2eb0639776dad966c2fb874d79d87ca07f9d5f059b8bd215"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e712b419df8ba5e42b226c510472b37bd57b38e897d3eca5e8cfd410a29fa859"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7804338df6fcc08105c7745f1502ba68d900f45fd770d5bdd5288ddccb8a42d8"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:481551899c856c704d58119b5025793fa6730adda3571971af568f66d2424bb5"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f59099f9b66f0d7145115e6f80dd8b1d847176df89b234a5a6b3f00437aa0832"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:f59ad4c0e8f6bba240a9bb85504faa1ab438237199d4cce5f622761507b8f6a6"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:3dedcc22d73ec993f42055eff4fcfed9318d1eeb9a6606c55892a26964964e48"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:64f02c6841d7d83f832cd97ccf8eb8a906d06eb95d5276069175c696b024b60a"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-win32.whl", hash = "sha256:4042d5c8f957e15221d423ba781e85d553722fc4113f523f2feb7b188cc34c5e"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:3946fa46a0cf3e4c8cb1cc52f56bb536310d34f25f01ca9b6c16afa767dab110"}, + {file = "charset_normalizer-3.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:80d04837f55fc81da168b98de4f4b797ef007fc8a79ab71c6ec9bc4dd662b15b"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:c36c333c39be2dbca264d7803333c896ab8fa7d4d6f0ab7edb7dfd7aea6e98c0"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1c2aed2e5e41f24ea8ef1590b8e848a79b56f3a5564a65ceec43c9d692dc7d8a"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:54523e136b8948060c0fa0bc7b1b50c32c186f2fceee897a495406bb6e311d2b"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:715479b9a2802ecac752a3b0efa2b0b60285cf962ee38414211abdfccc233b41"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bd6c2a1c7573c64738d716488d2cdd3c00e340e4835707d8fdb8dc1a66ef164e"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:c45e9440fb78f8ddabcf714b68f936737a121355bf59f3907f4e17721b9d1aae"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:3534e7dcbdcf757da6b85a0bbf5b6868786d5982dd959b065e65481644817a18"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:e8ac484bf18ce6975760921bb6148041faa8fef0547200386ea0b52b5d27bf7b"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a5fe03b42827c13cdccd08e6c0247b6a6d4b5e3cdc53fd1749f5896adcdc2356"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:2d6eb928e13016cea4f1f21d1e10c1cebd5a421bc57ddf5b1142ae3f86824fab"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:e74327fb75de8986940def6e8dee4f127cc9752bee7355bb323cc5b2659b6d46"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:d6038d37043bced98a66e68d3aa2b6a35505dc01328cd65217cefe82f25def44"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7579e913a5339fb8fa133f6bbcfd8e6749696206cf05acdbdca71a1b436d8e72"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-win32.whl", hash = "sha256:5b77459df20e08151cd6f8b9ef8ef1f961ef73d85c21a555c7eed5b79410ec10"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-win_amd64.whl", hash = "sha256:92a0a01ead5e668468e952e4238cccd7c537364eb7d851ab144ab6627dbbe12f"}, + {file = "charset_normalizer-3.4.7-cp314-cp314-win_arm64.whl", hash = "sha256:67f6279d125ca0046a7fd386d01b311c6363844deac3e5b069b514ba3e63c246"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:effc3f449787117233702311a1b7d8f59cba9ced946ba727bdc329ec69028e24"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fbccdc05410c9ee21bbf16a35f4c1d16123dcdeb8a1d38f33654fa21d0234f79"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:733784b6d6def852c814bce5f318d25da2ee65dd4839a0718641c696e09a2960"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a89c23ef8d2c6b27fd200a42aa4ac72786e7c60d40efdc76e6011260b6e949c4"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6c114670c45346afedc0d947faf3c7f701051d2518b943679c8ff88befe14f8e"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:a180c5e59792af262bf263b21a3c49353f25945d8d9f70628e73de370d55e1e1"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:3c9a494bc5ec77d43cea229c4f6db1e4d8fe7e1bbffa8b6f0f0032430ff8ab44"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8d828b6667a32a728a1ad1d93957cdf37489c57b97ae6c4de2860fa749b8fc1e"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:cf1493cd8607bec4d8a7b9b004e699fcf8f9103a9284cc94962cb73d20f9d4a3"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:0c96c3b819b5c3e9e165495db84d41914d6894d55181d2d108cc1a69bfc9cce0"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:752a45dc4a6934060b3b0dab47e04edc3326575f82be64bc4fc293914566503e"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:8778f0c7a52e56f75d12dae53ae320fae900a8b9b4164b981b9c5ce059cd1fcb"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ce3412fbe1e31eb81ea42f4169ed94861c56e643189e1e75f0041f3fe7020abe"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-win32.whl", hash = "sha256:c03a41a8784091e67a39648f70c5f97b5b6a37f216896d44d2cdcb82615339a0"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-win_amd64.whl", hash = "sha256:03853ed82eeebbce3c2abfdbc98c96dc205f32a79627688ac9a27370ea61a49c"}, + {file = "charset_normalizer-3.4.7-cp314-cp314t-win_arm64.whl", hash = "sha256:c35abb8bfff0185efac5878da64c45dafd2b37fb0383add1be155a763c1f083d"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e5f4d355f0a2b1a31bc3edec6795b46324349c9cb25eed068049e4f472fb4259"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:16d971e29578a5e97d7117866d15889a4a07befe0e87e703ed63cd90cb348c01"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:dca4bbc466a95ba9c0234ef56d7dd9509f63da22274589ebd4ed7f1f4d4c54e3"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e80c8378d8f3d83cd3164da1ad2df9e37a666cdde7b1cb2298ed0b558064be30"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:36836d6ff945a00b88ba1e4572d721e60b5b8c98c155d465f56ad19d68f23734"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux_2_31_armv7l.whl", hash = "sha256:bd9b23791fe793e4968dba0c447e12f78e425c59fc0e3b97f6450f4781f3ee60"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:aef65cd602a6d0e0ff6f9930fcb1c8fec60dd2cfcb6facaf4bdb0e5873042db0"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:82b271f5137d07749f7bf32f70b17ab6eaabedd297e75dce75081a24f76eb545"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:1efde3cae86c8c273f1eb3b287be7d8499420cf2fe7585c41d370d3e790054a5"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:c593052c465475e64bbfe5dbd81680f64a67fdc752c56d7a0ae205dc8aeefe0f"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_riscv64.whl", hash = "sha256:af21eb4409a119e365397b2adbaca4c9ccab56543a65d5dbd9f920d6ac29f686"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:84c018e49c3bf790f9c2771c45e9313a08c2c2a6342b162cd650258b57817706"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dd915403e231e6b1809fe9b6d9fc55cf8fb5e02765ac625d9cd623342a7905d7"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-win32.whl", hash = "sha256:320ade88cfb846b8cd6b4ddf5ee9e80ee0c1f52401f2456b84ae1ae6a1a5f207"}, + {file = "charset_normalizer-3.4.7-cp38-cp38-win_amd64.whl", hash = "sha256:1dc8b0ea451d6e69735094606991f32867807881400f808a106ee1d963c46a83"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:177a0ba5f0211d488e295aaf82707237e331c24788d8d76c96c5a41594723217"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6e0d51f618228538a3e8f46bd246f87a6cd030565e015803691603f55e12afb5"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:14265bfe1f09498b9d8ec91e9ec9fa52775edf90fcbde092b25f4a33d444fea9"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:87fad7d9ba98c86bcb41b2dc8dbb326619be2562af1f8ff50776a39e55721c5a"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f22dec1690b584cea26fade98b2435c132c1b5f68e39f5a0b7627cd7ae31f1dc"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux_2_31_armv7l.whl", hash = "sha256:d61f00a0869d77422d9b2aba989e2d24afa6ffd552af442e0e58de4f35ea6d00"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6370e8686f662e6a3941ee48ed4742317cafbe5707e36406e9df792cdb535776"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a6c5863edfbe888d9eff9c8b8087354e27618d9da76425c119293f11712a6319"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ed065083d0898c9d5b4bbec7b026fd755ff7454e6e8b73a67f8c744b13986e24"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2cd4a60d0e2fb04537162c62bbbb4182f53541fe0ede35cdf270a1c1e723cc42"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:813c0e0132266c08eb87469a642cb30aaff57c5f426255419572aaeceeaa7bf4"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:07d9e39b01743c3717745f4c530a6349eadbfa043c7577eef86c502c15df2c67"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c0f081d69a6e58272819b70288d3221a6ee64b98df852631c80f293514d3b274"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-win32.whl", hash = "sha256:8751d2787c9131302398b11e6c8068053dcb55d5a8964e114b6e196cf16cb366"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-win_amd64.whl", hash = "sha256:12a6fff75f6bc66711b73a2f0addfc4c8c15a20e805146a02d147a318962c444"}, + {file = "charset_normalizer-3.4.7-cp39-cp39-win_arm64.whl", hash = "sha256:bb8cc7534f51d9a017b93e3e85b260924f909601c3df002bcdb58ddb4dc41a5c"}, + {file = "charset_normalizer-3.4.7-py3-none-any.whl", hash = "sha256:3dce51d0f5e7951f8bb4900c257dad282f49190fdbebecd4ba99bcc41fef404d"}, + {file = "charset_normalizer-3.4.7.tar.gz", hash = "sha256:ae89db9e5f98a11a4bf50407d4363e7b09b31e55bc117b4f7d80aab97ba009e5"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 12976d24..d5a47bea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ dynamic = ["version"] [tool.poetry] name = "deepgram-sdk" -version = "6.1.1" +version = "6.1.2" description = "" readme = "README.md" authors = [] diff --git a/src/deepgram/__init__.py b/src/deepgram/__init__.py index 20dd61db..63bf6069 100644 --- a/src/deepgram/__init__.py +++ b/src/deepgram/__init__.py @@ -7,30 +7,37 @@ if typing.TYPE_CHECKING: from .types import ( + AgentConfigurationV1, AgentThinkModelsV1Response, AgentThinkModelsV1ResponseModelsItem, AgentThinkModelsV1ResponseModelsItemId, - AgentThinkModelsV1ResponseModelsItemIdProvider, AgentThinkModelsV1ResponseModelsItemOne, AgentThinkModelsV1ResponseModelsItemOneId, - AgentThinkModelsV1ResponseModelsItemOneProvider, AgentThinkModelsV1ResponseModelsItemThree, - AgentThinkModelsV1ResponseModelsItemThreeId, - AgentThinkModelsV1ResponseModelsItemThreeProvider, AgentThinkModelsV1ResponseModelsItemTwo, AgentThinkModelsV1ResponseModelsItemTwoId, - AgentThinkModelsV1ResponseModelsItemTwoProvider, AgentThinkModelsV1ResponseModelsItemZero, AgentThinkModelsV1ResponseModelsItemZeroId, - AgentThinkModelsV1ResponseModelsItemZeroProvider, + AgentVariableV1, Anthropic, + AnthropicThinkProviderModel, AwsBedrockThinkProvider, + AwsBedrockThinkProviderCredentials, + AwsBedrockThinkProviderCredentialsType, + AwsBedrockThinkProviderModel, AwsPollySpeakProvider, + AwsPollySpeakProviderCredentials, + AwsPollySpeakProviderCredentialsType, + AwsPollySpeakProviderEngine, + AwsPollySpeakProviderVoice, BillingBreakdownV1Response, BillingBreakdownV1ResponseResolution, BillingBreakdownV1ResponseResultsItem, BillingBreakdownV1ResponseResultsItemGrouping, Cartesia, + CartesiaSpeakProviderModelId, + CartesiaSpeakProviderVoice, + CreateAgentConfigurationV1Response, CreateKeyV1RequestOne, CreateKeyV1Response, CreateProjectDistributionCredentialsV1Response, @@ -38,11 +45,15 @@ CreateProjectDistributionCredentialsV1ResponseMember, CreateProjectInviteV1Response, Deepgram, + DeepgramSpeakProviderModel, + DeleteAgentConfigurationV1Response, + DeleteAgentVariableV1Response, DeleteProjectInviteV1Response, DeleteProjectKeyV1Response, DeleteProjectMemberV1Response, DeleteProjectV1Response, ElevenLabsSpeakProvider, + ElevenLabsSpeakProviderModelId, ErrorResponse, ErrorResponseLegacyError, ErrorResponseModernError, @@ -62,9 +73,12 @@ GetProjectRequestV1Response, GetProjectV1Response, Google, + GoogleThinkProviderModel, GrantV1Response, Groq, LeaveProjectV1Response, + ListAgentConfigurationsV1Response, + ListAgentVariablesV1Response, ListBillingFieldsV1Response, ListBillingFieldsV1ResponseDeploymentsItem, ListModelsV1Response, @@ -154,7 +168,10 @@ ListenV2SampleRate, ListenV2Tag, OpenAiSpeakProvider, + OpenAiSpeakProviderModel, + OpenAiSpeakProviderVoice, OpenAiThinkProvider, + OpenAiThinkProviderModel, ProjectRequestResponse, ReadV1Request, ReadV1RequestText, @@ -184,12 +201,29 @@ SharedTopicsResultsTopicsSegmentsItem, SharedTopicsResultsTopicsSegmentsItemTopicsItem, SpeakSettingsV1, + SpeakSettingsV1Endpoint, + SpeakSettingsV1Provider, + SpeakSettingsV1Provider_AwsPolly, + SpeakSettingsV1Provider_Cartesia, + SpeakSettingsV1Provider_Deepgram, + SpeakSettingsV1Provider_ElevenLabs, + SpeakSettingsV1Provider_OpenAi, SpeakV1Encoding, SpeakV1MipOptOut, SpeakV1Model, SpeakV1Response, SpeakV1SampleRate, ThinkSettingsV1, + ThinkSettingsV1ContextLength, + ThinkSettingsV1Endpoint, + ThinkSettingsV1FunctionsItem, + ThinkSettingsV1FunctionsItemEndpoint, + ThinkSettingsV1Provider, + ThinkSettingsV1Provider_Anthropic, + ThinkSettingsV1Provider_AwsBedrock, + ThinkSettingsV1Provider_Google, + ThinkSettingsV1Provider_Groq, + ThinkSettingsV1Provider_OpenAi, UpdateProjectMemberScopesV1Response, UpdateProjectV1Response, UsageBreakdownV1Response, @@ -202,10 +236,11 @@ UsageV1ResponseResolution, ) from .errors import BadRequestError - from . import agent, auth, listen, manage, read, self_hosted, speak + from . import agent, auth, listen, manage, read, self_hosted, speak, voice_agent from .client import AsyncDeepgramClient, DeepgramClient from .environment import DeepgramClientEnvironment from .requests import ( + AgentConfigurationV1Params, AgentThinkModelsV1ResponseModelsItemIdParams, AgentThinkModelsV1ResponseModelsItemOneParams, AgentThinkModelsV1ResponseModelsItemParams, @@ -213,19 +248,30 @@ AgentThinkModelsV1ResponseModelsItemTwoParams, AgentThinkModelsV1ResponseModelsItemZeroParams, AgentThinkModelsV1ResponseParams, + AgentVariableV1Params, + AnthropicParams, + AwsBedrockThinkProviderCredentialsParams, + AwsBedrockThinkProviderParams, + AwsPollySpeakProviderCredentialsParams, + AwsPollySpeakProviderParams, BillingBreakdownV1ResponseParams, BillingBreakdownV1ResponseResolutionParams, BillingBreakdownV1ResponseResultsItemGroupingParams, BillingBreakdownV1ResponseResultsItemParams, + CartesiaParams, + CartesiaSpeakProviderVoiceParams, + CreateAgentConfigurationV1ResponseParams, CreateKeyV1ResponseParams, CreateProjectDistributionCredentialsV1ResponseDistributionCredentialsParams, CreateProjectDistributionCredentialsV1ResponseMemberParams, CreateProjectDistributionCredentialsV1ResponseParams, CreateProjectInviteV1ResponseParams, + DeepgramParams, DeleteProjectInviteV1ResponseParams, DeleteProjectKeyV1ResponseParams, DeleteProjectMemberV1ResponseParams, DeleteProjectV1ResponseParams, + ElevenLabsSpeakProviderParams, ErrorResponseLegacyErrorParams, ErrorResponseModernErrorParams, ErrorResponseParams, @@ -243,8 +289,12 @@ GetProjectKeyV1ResponseParams, GetProjectRequestV1ResponseParams, GetProjectV1ResponseParams, + GoogleParams, GrantV1ResponseParams, + GroqParams, LeaveProjectV1ResponseParams, + ListAgentConfigurationsV1ResponseParams, + ListAgentVariablesV1ResponseParams, ListBillingFieldsV1ResponseParams, ListModelsV1ResponseParams, ListModelsV1ResponseSttModelsParams, @@ -295,6 +345,8 @@ ListenV1ResponseResultsUtterancesItemWordsItemParams, ListenV1ResponseResultsUtterancesParams, ListenV2KeytermParams, + OpenAiSpeakProviderParams, + OpenAiThinkProviderParams, ProjectRequestResponseParams, ReadV1RequestParams, ReadV1RequestTextParams, @@ -323,7 +375,25 @@ SharedTopicsResultsTopicsParams, SharedTopicsResultsTopicsSegmentsItemParams, SharedTopicsResultsTopicsSegmentsItemTopicsItemParams, + SpeakSettingsV1EndpointParams, + SpeakSettingsV1Params, + SpeakSettingsV1ProviderParams, + SpeakSettingsV1Provider_AwsPollyParams, + SpeakSettingsV1Provider_CartesiaParams, + SpeakSettingsV1Provider_DeepgramParams, + SpeakSettingsV1Provider_ElevenLabsParams, + SpeakSettingsV1Provider_OpenAiParams, + ThinkSettingsV1ContextLengthParams, + ThinkSettingsV1EndpointParams, + ThinkSettingsV1FunctionsItemEndpointParams, + ThinkSettingsV1FunctionsItemParams, ThinkSettingsV1Params, + ThinkSettingsV1ProviderParams, + ThinkSettingsV1Provider_AnthropicParams, + ThinkSettingsV1Provider_AwsBedrockParams, + ThinkSettingsV1Provider_GoogleParams, + ThinkSettingsV1Provider_GroqParams, + ThinkSettingsV1Provider_OpenAiParams, UpdateProjectMemberScopesV1ResponseParams, UpdateProjectV1ResponseParams, UsageBreakdownV1ResponseParams, @@ -337,33 +407,44 @@ ) from .version import __version__ _dynamic_imports: typing.Dict[str, str] = { + "AgentConfigurationV1": ".types", + "AgentConfigurationV1Params": ".requests", "AgentThinkModelsV1Response": ".types", "AgentThinkModelsV1ResponseModelsItem": ".types", "AgentThinkModelsV1ResponseModelsItemId": ".types", "AgentThinkModelsV1ResponseModelsItemIdParams": ".requests", - "AgentThinkModelsV1ResponseModelsItemIdProvider": ".types", "AgentThinkModelsV1ResponseModelsItemOne": ".types", "AgentThinkModelsV1ResponseModelsItemOneId": ".types", "AgentThinkModelsV1ResponseModelsItemOneParams": ".requests", - "AgentThinkModelsV1ResponseModelsItemOneProvider": ".types", "AgentThinkModelsV1ResponseModelsItemParams": ".requests", "AgentThinkModelsV1ResponseModelsItemThree": ".types", - "AgentThinkModelsV1ResponseModelsItemThreeId": ".types", "AgentThinkModelsV1ResponseModelsItemThreeParams": ".requests", - "AgentThinkModelsV1ResponseModelsItemThreeProvider": ".types", "AgentThinkModelsV1ResponseModelsItemTwo": ".types", "AgentThinkModelsV1ResponseModelsItemTwoId": ".types", "AgentThinkModelsV1ResponseModelsItemTwoParams": ".requests", - "AgentThinkModelsV1ResponseModelsItemTwoProvider": ".types", "AgentThinkModelsV1ResponseModelsItemZero": ".types", "AgentThinkModelsV1ResponseModelsItemZeroId": ".types", "AgentThinkModelsV1ResponseModelsItemZeroParams": ".requests", - "AgentThinkModelsV1ResponseModelsItemZeroProvider": ".types", "AgentThinkModelsV1ResponseParams": ".requests", + "AgentVariableV1": ".types", + "AgentVariableV1Params": ".requests", "Anthropic": ".types", + "AnthropicParams": ".requests", + "AnthropicThinkProviderModel": ".types", "AsyncDeepgramClient": ".client", "AwsBedrockThinkProvider": ".types", + "AwsBedrockThinkProviderCredentials": ".types", + "AwsBedrockThinkProviderCredentialsParams": ".requests", + "AwsBedrockThinkProviderCredentialsType": ".types", + "AwsBedrockThinkProviderModel": ".types", + "AwsBedrockThinkProviderParams": ".requests", "AwsPollySpeakProvider": ".types", + "AwsPollySpeakProviderCredentials": ".types", + "AwsPollySpeakProviderCredentialsParams": ".requests", + "AwsPollySpeakProviderCredentialsType": ".types", + "AwsPollySpeakProviderEngine": ".types", + "AwsPollySpeakProviderParams": ".requests", + "AwsPollySpeakProviderVoice": ".types", "BadRequestError": ".errors", "BillingBreakdownV1Response": ".types", "BillingBreakdownV1ResponseParams": ".requests", @@ -374,6 +455,12 @@ "BillingBreakdownV1ResponseResultsItemGroupingParams": ".requests", "BillingBreakdownV1ResponseResultsItemParams": ".requests", "Cartesia": ".types", + "CartesiaParams": ".requests", + "CartesiaSpeakProviderModelId": ".types", + "CartesiaSpeakProviderVoice": ".types", + "CartesiaSpeakProviderVoiceParams": ".requests", + "CreateAgentConfigurationV1Response": ".types", + "CreateAgentConfigurationV1ResponseParams": ".requests", "CreateKeyV1RequestOne": ".types", "CreateKeyV1Response": ".types", "CreateKeyV1ResponseParams": ".requests", @@ -388,6 +475,10 @@ "Deepgram": ".types", "DeepgramClient": ".client", "DeepgramClientEnvironment": ".environment", + "DeepgramParams": ".requests", + "DeepgramSpeakProviderModel": ".types", + "DeleteAgentConfigurationV1Response": ".types", + "DeleteAgentVariableV1Response": ".types", "DeleteProjectInviteV1Response": ".types", "DeleteProjectInviteV1ResponseParams": ".requests", "DeleteProjectKeyV1Response": ".types", @@ -397,6 +488,8 @@ "DeleteProjectV1Response": ".types", "DeleteProjectV1ResponseParams": ".requests", "ElevenLabsSpeakProvider": ".types", + "ElevenLabsSpeakProviderModelId": ".types", + "ElevenLabsSpeakProviderParams": ".requests", "ErrorResponse": ".types", "ErrorResponseLegacyError": ".types", "ErrorResponseLegacyErrorParams": ".requests", @@ -433,11 +526,18 @@ "GetProjectV1Response": ".types", "GetProjectV1ResponseParams": ".requests", "Google": ".types", + "GoogleParams": ".requests", + "GoogleThinkProviderModel": ".types", "GrantV1Response": ".types", "GrantV1ResponseParams": ".requests", "Groq": ".types", + "GroqParams": ".requests", "LeaveProjectV1Response": ".types", "LeaveProjectV1ResponseParams": ".requests", + "ListAgentConfigurationsV1Response": ".types", + "ListAgentConfigurationsV1ResponseParams": ".requests", + "ListAgentVariablesV1Response": ".types", + "ListAgentVariablesV1ResponseParams": ".requests", "ListBillingFieldsV1Response": ".types", "ListBillingFieldsV1ResponseDeploymentsItem": ".types", "ListBillingFieldsV1ResponseParams": ".requests", @@ -577,7 +677,12 @@ "ListenV2SampleRate": ".types", "ListenV2Tag": ".types", "OpenAiSpeakProvider": ".types", + "OpenAiSpeakProviderModel": ".types", + "OpenAiSpeakProviderParams": ".requests", + "OpenAiSpeakProviderVoice": ".types", "OpenAiThinkProvider": ".types", + "OpenAiThinkProviderModel": ".types", + "OpenAiThinkProviderParams": ".requests", "ProjectRequestResponse": ".types", "ProjectRequestResponseParams": ".requests", "ReadV1Request": ".types", @@ -635,13 +740,48 @@ "SharedTopicsResultsTopicsSegmentsItemTopicsItem": ".types", "SharedTopicsResultsTopicsSegmentsItemTopicsItemParams": ".requests", "SpeakSettingsV1": ".types", + "SpeakSettingsV1Endpoint": ".types", + "SpeakSettingsV1EndpointParams": ".requests", + "SpeakSettingsV1Params": ".requests", + "SpeakSettingsV1Provider": ".types", + "SpeakSettingsV1ProviderParams": ".requests", + "SpeakSettingsV1Provider_AwsPolly": ".types", + "SpeakSettingsV1Provider_AwsPollyParams": ".requests", + "SpeakSettingsV1Provider_Cartesia": ".types", + "SpeakSettingsV1Provider_CartesiaParams": ".requests", + "SpeakSettingsV1Provider_Deepgram": ".types", + "SpeakSettingsV1Provider_DeepgramParams": ".requests", + "SpeakSettingsV1Provider_ElevenLabs": ".types", + "SpeakSettingsV1Provider_ElevenLabsParams": ".requests", + "SpeakSettingsV1Provider_OpenAi": ".types", + "SpeakSettingsV1Provider_OpenAiParams": ".requests", "SpeakV1Encoding": ".types", "SpeakV1MipOptOut": ".types", "SpeakV1Model": ".types", "SpeakV1Response": ".types", "SpeakV1SampleRate": ".types", "ThinkSettingsV1": ".types", + "ThinkSettingsV1ContextLength": ".types", + "ThinkSettingsV1ContextLengthParams": ".requests", + "ThinkSettingsV1Endpoint": ".types", + "ThinkSettingsV1EndpointParams": ".requests", + "ThinkSettingsV1FunctionsItem": ".types", + "ThinkSettingsV1FunctionsItemEndpoint": ".types", + "ThinkSettingsV1FunctionsItemEndpointParams": ".requests", + "ThinkSettingsV1FunctionsItemParams": ".requests", "ThinkSettingsV1Params": ".requests", + "ThinkSettingsV1Provider": ".types", + "ThinkSettingsV1ProviderParams": ".requests", + "ThinkSettingsV1Provider_Anthropic": ".types", + "ThinkSettingsV1Provider_AnthropicParams": ".requests", + "ThinkSettingsV1Provider_AwsBedrock": ".types", + "ThinkSettingsV1Provider_AwsBedrockParams": ".requests", + "ThinkSettingsV1Provider_Google": ".types", + "ThinkSettingsV1Provider_GoogleParams": ".requests", + "ThinkSettingsV1Provider_Groq": ".types", + "ThinkSettingsV1Provider_GroqParams": ".requests", + "ThinkSettingsV1Provider_OpenAi": ".types", + "ThinkSettingsV1Provider_OpenAiParams": ".requests", "UpdateProjectMemberScopesV1Response": ".types", "UpdateProjectMemberScopesV1ResponseParams": ".requests", "UpdateProjectV1Response": ".types", @@ -670,6 +810,7 @@ "read": ".read", "self_hosted": ".self_hosted", "speak": ".speak", + "voice_agent": ".voice_agent", } @@ -695,33 +836,44 @@ def __dir__(): __all__ = [ + "AgentConfigurationV1", + "AgentConfigurationV1Params", "AgentThinkModelsV1Response", "AgentThinkModelsV1ResponseModelsItem", "AgentThinkModelsV1ResponseModelsItemId", "AgentThinkModelsV1ResponseModelsItemIdParams", - "AgentThinkModelsV1ResponseModelsItemIdProvider", "AgentThinkModelsV1ResponseModelsItemOne", "AgentThinkModelsV1ResponseModelsItemOneId", "AgentThinkModelsV1ResponseModelsItemOneParams", - "AgentThinkModelsV1ResponseModelsItemOneProvider", "AgentThinkModelsV1ResponseModelsItemParams", "AgentThinkModelsV1ResponseModelsItemThree", - "AgentThinkModelsV1ResponseModelsItemThreeId", "AgentThinkModelsV1ResponseModelsItemThreeParams", - "AgentThinkModelsV1ResponseModelsItemThreeProvider", "AgentThinkModelsV1ResponseModelsItemTwo", "AgentThinkModelsV1ResponseModelsItemTwoId", "AgentThinkModelsV1ResponseModelsItemTwoParams", - "AgentThinkModelsV1ResponseModelsItemTwoProvider", "AgentThinkModelsV1ResponseModelsItemZero", "AgentThinkModelsV1ResponseModelsItemZeroId", "AgentThinkModelsV1ResponseModelsItemZeroParams", - "AgentThinkModelsV1ResponseModelsItemZeroProvider", "AgentThinkModelsV1ResponseParams", + "AgentVariableV1", + "AgentVariableV1Params", "Anthropic", + "AnthropicParams", + "AnthropicThinkProviderModel", "AsyncDeepgramClient", "AwsBedrockThinkProvider", + "AwsBedrockThinkProviderCredentials", + "AwsBedrockThinkProviderCredentialsParams", + "AwsBedrockThinkProviderCredentialsType", + "AwsBedrockThinkProviderModel", + "AwsBedrockThinkProviderParams", "AwsPollySpeakProvider", + "AwsPollySpeakProviderCredentials", + "AwsPollySpeakProviderCredentialsParams", + "AwsPollySpeakProviderCredentialsType", + "AwsPollySpeakProviderEngine", + "AwsPollySpeakProviderParams", + "AwsPollySpeakProviderVoice", "BadRequestError", "BillingBreakdownV1Response", "BillingBreakdownV1ResponseParams", @@ -732,6 +884,12 @@ def __dir__(): "BillingBreakdownV1ResponseResultsItemGroupingParams", "BillingBreakdownV1ResponseResultsItemParams", "Cartesia", + "CartesiaParams", + "CartesiaSpeakProviderModelId", + "CartesiaSpeakProviderVoice", + "CartesiaSpeakProviderVoiceParams", + "CreateAgentConfigurationV1Response", + "CreateAgentConfigurationV1ResponseParams", "CreateKeyV1RequestOne", "CreateKeyV1Response", "CreateKeyV1ResponseParams", @@ -746,6 +904,10 @@ def __dir__(): "Deepgram", "DeepgramClient", "DeepgramClientEnvironment", + "DeepgramParams", + "DeepgramSpeakProviderModel", + "DeleteAgentConfigurationV1Response", + "DeleteAgentVariableV1Response", "DeleteProjectInviteV1Response", "DeleteProjectInviteV1ResponseParams", "DeleteProjectKeyV1Response", @@ -755,6 +917,8 @@ def __dir__(): "DeleteProjectV1Response", "DeleteProjectV1ResponseParams", "ElevenLabsSpeakProvider", + "ElevenLabsSpeakProviderModelId", + "ElevenLabsSpeakProviderParams", "ErrorResponse", "ErrorResponseLegacyError", "ErrorResponseLegacyErrorParams", @@ -791,11 +955,18 @@ def __dir__(): "GetProjectV1Response", "GetProjectV1ResponseParams", "Google", + "GoogleParams", + "GoogleThinkProviderModel", "GrantV1Response", "GrantV1ResponseParams", "Groq", + "GroqParams", "LeaveProjectV1Response", "LeaveProjectV1ResponseParams", + "ListAgentConfigurationsV1Response", + "ListAgentConfigurationsV1ResponseParams", + "ListAgentVariablesV1Response", + "ListAgentVariablesV1ResponseParams", "ListBillingFieldsV1Response", "ListBillingFieldsV1ResponseDeploymentsItem", "ListBillingFieldsV1ResponseParams", @@ -935,7 +1106,12 @@ def __dir__(): "ListenV2SampleRate", "ListenV2Tag", "OpenAiSpeakProvider", + "OpenAiSpeakProviderModel", + "OpenAiSpeakProviderParams", + "OpenAiSpeakProviderVoice", "OpenAiThinkProvider", + "OpenAiThinkProviderModel", + "OpenAiThinkProviderParams", "ProjectRequestResponse", "ProjectRequestResponseParams", "ReadV1Request", @@ -993,13 +1169,48 @@ def __dir__(): "SharedTopicsResultsTopicsSegmentsItemTopicsItem", "SharedTopicsResultsTopicsSegmentsItemTopicsItemParams", "SpeakSettingsV1", + "SpeakSettingsV1Endpoint", + "SpeakSettingsV1EndpointParams", + "SpeakSettingsV1Params", + "SpeakSettingsV1Provider", + "SpeakSettingsV1ProviderParams", + "SpeakSettingsV1Provider_AwsPolly", + "SpeakSettingsV1Provider_AwsPollyParams", + "SpeakSettingsV1Provider_Cartesia", + "SpeakSettingsV1Provider_CartesiaParams", + "SpeakSettingsV1Provider_Deepgram", + "SpeakSettingsV1Provider_DeepgramParams", + "SpeakSettingsV1Provider_ElevenLabs", + "SpeakSettingsV1Provider_ElevenLabsParams", + "SpeakSettingsV1Provider_OpenAi", + "SpeakSettingsV1Provider_OpenAiParams", "SpeakV1Encoding", "SpeakV1MipOptOut", "SpeakV1Model", "SpeakV1Response", "SpeakV1SampleRate", "ThinkSettingsV1", + "ThinkSettingsV1ContextLength", + "ThinkSettingsV1ContextLengthParams", + "ThinkSettingsV1Endpoint", + "ThinkSettingsV1EndpointParams", + "ThinkSettingsV1FunctionsItem", + "ThinkSettingsV1FunctionsItemEndpoint", + "ThinkSettingsV1FunctionsItemEndpointParams", + "ThinkSettingsV1FunctionsItemParams", "ThinkSettingsV1Params", + "ThinkSettingsV1Provider", + "ThinkSettingsV1ProviderParams", + "ThinkSettingsV1Provider_Anthropic", + "ThinkSettingsV1Provider_AnthropicParams", + "ThinkSettingsV1Provider_AwsBedrock", + "ThinkSettingsV1Provider_AwsBedrockParams", + "ThinkSettingsV1Provider_Google", + "ThinkSettingsV1Provider_GoogleParams", + "ThinkSettingsV1Provider_Groq", + "ThinkSettingsV1Provider_GroqParams", + "ThinkSettingsV1Provider_OpenAi", + "ThinkSettingsV1Provider_OpenAiParams", "UpdateProjectMemberScopesV1Response", "UpdateProjectMemberScopesV1ResponseParams", "UpdateProjectV1Response", @@ -1028,4 +1239,5 @@ def __dir__(): "read", "self_hosted", "speak", + "voice_agent", ] diff --git a/src/deepgram/agent/__init__.py b/src/deepgram/agent/__init__.py index 5b5014f4..cd2e01da 100644 --- a/src/deepgram/agent/__init__.py +++ b/src/deepgram/agent/__init__.py @@ -10,46 +10,33 @@ from .v1 import ( AgentV1AgentAudioDone, AgentV1AgentAudioDoneParams, - AgentV1AgentAudioDoneType, AgentV1AgentStartedSpeaking, AgentV1AgentStartedSpeakingParams, - AgentV1AgentStartedSpeakingType, AgentV1AgentThinking, AgentV1AgentThinkingParams, - AgentV1AgentThinkingType, AgentV1ConversationText, AgentV1ConversationTextParams, AgentV1ConversationTextRole, - AgentV1ConversationTextType, AgentV1Error, AgentV1ErrorParams, - AgentV1ErrorType, AgentV1FunctionCallRequest, AgentV1FunctionCallRequestFunctionsItem, AgentV1FunctionCallRequestFunctionsItemParams, AgentV1FunctionCallRequestParams, - AgentV1FunctionCallRequestType, AgentV1InjectAgentMessage, AgentV1InjectAgentMessageParams, - AgentV1InjectAgentMessageType, AgentV1InjectUserMessage, AgentV1InjectUserMessageParams, - AgentV1InjectUserMessageType, AgentV1InjectionRefused, AgentV1InjectionRefusedParams, - AgentV1InjectionRefusedType, AgentV1KeepAlive, AgentV1KeepAliveParams, - AgentV1KeepAliveType, AgentV1PromptUpdated, AgentV1PromptUpdatedParams, - AgentV1PromptUpdatedType, AgentV1ReceiveFunctionCallResponse, AgentV1ReceiveFunctionCallResponseParams, - AgentV1ReceiveFunctionCallResponseType, AgentV1SendFunctionCallResponse, AgentV1SendFunctionCallResponseParams, - AgentV1SendFunctionCallResponseType, AgentV1Settings, AgentV1SettingsAgent, AgentV1SettingsAgentContext, @@ -57,12 +44,10 @@ AgentV1SettingsAgentContextMessagesItemContent, AgentV1SettingsAgentContextMessagesItemContentParams, AgentV1SettingsAgentContextMessagesItemContentRole, - AgentV1SettingsAgentContextMessagesItemContentType, AgentV1SettingsAgentContextMessagesItemFunctionCalls, AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem, AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams, AgentV1SettingsAgentContextMessagesItemFunctionCallsParams, - AgentV1SettingsAgentContextMessagesItemFunctionCallsType, AgentV1SettingsAgentContextMessagesItemParams, AgentV1SettingsAgentContextParams, AgentV1SettingsAgentListen, @@ -71,106 +56,19 @@ AgentV1SettingsAgentListenProviderParams, AgentV1SettingsAgentListenProviderV1, AgentV1SettingsAgentListenProviderV1Params, - AgentV1SettingsAgentListenProviderV1Type, AgentV1SettingsAgentListenProviderV2, AgentV1SettingsAgentListenProviderV2Params, - AgentV1SettingsAgentListenProviderV2Type, AgentV1SettingsAgentListenProvider_V1, AgentV1SettingsAgentListenProvider_V1Params, AgentV1SettingsAgentListenProvider_V2, AgentV1SettingsAgentListenProvider_V2Params, AgentV1SettingsAgentParams, AgentV1SettingsAgentSpeak, - AgentV1SettingsAgentSpeakEndpoint, - AgentV1SettingsAgentSpeakEndpointEndpoint, - AgentV1SettingsAgentSpeakEndpointEndpointParams, - AgentV1SettingsAgentSpeakEndpointParams, - AgentV1SettingsAgentSpeakEndpointProvider, - AgentV1SettingsAgentSpeakEndpointProviderAwsPolly, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams, - AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel, - AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion, - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId, - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion, - AgentV1SettingsAgentSpeakEndpointProviderOpenAi, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, - AgentV1SettingsAgentSpeakEndpointProviderParams, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakEndpointProvider_Cartesia, - AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams, - AgentV1SettingsAgentSpeakEndpointProvider_Deepgram, - AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAi, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams, - AgentV1SettingsAgentSpeakOneItem, - AgentV1SettingsAgentSpeakOneItemEndpoint, - AgentV1SettingsAgentSpeakOneItemEndpointParams, - AgentV1SettingsAgentSpeakOneItemParams, - AgentV1SettingsAgentSpeakOneItemProvider, - AgentV1SettingsAgentSpeakOneItemProviderAwsPolly, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, - AgentV1SettingsAgentSpeakOneItemProviderOpenAi, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, - AgentV1SettingsAgentSpeakOneItemProviderParams, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakOneItemProvider_Cartesia, - AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams, - AgentV1SettingsAgentSpeakOneItemProvider_Deepgram, - AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAi, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams, AgentV1SettingsAgentSpeakParams, AgentV1SettingsAgentThink, - AgentV1SettingsAgentThinkOneItem, - AgentV1SettingsAgentThinkOneItemContextLength, - AgentV1SettingsAgentThinkOneItemContextLengthParams, - AgentV1SettingsAgentThinkOneItemEndpoint, - AgentV1SettingsAgentThinkOneItemEndpointParams, - AgentV1SettingsAgentThinkOneItemFunctionsItem, - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint, - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams, - AgentV1SettingsAgentThinkOneItemFunctionsItemParams, - AgentV1SettingsAgentThinkOneItemParams, - AgentV1SettingsAgentThinkOneItemProvider, - AgentV1SettingsAgentThinkOneItemProviderParams, AgentV1SettingsAgentThinkParams, AgentV1SettingsApplied, AgentV1SettingsAppliedParams, - AgentV1SettingsAppliedType, AgentV1SettingsAudio, AgentV1SettingsAudioInput, AgentV1SettingsAudioInputEncoding, @@ -182,162 +80,57 @@ AgentV1SettingsFlags, AgentV1SettingsFlagsParams, AgentV1SettingsParams, - AgentV1SettingsType, AgentV1SpeakUpdated, AgentV1SpeakUpdatedParams, - AgentV1SpeakUpdatedType, + AgentV1ThinkUpdated, + AgentV1ThinkUpdatedParams, AgentV1UpdatePrompt, AgentV1UpdatePromptParams, - AgentV1UpdatePromptType, AgentV1UpdateSpeak, AgentV1UpdateSpeakParams, AgentV1UpdateSpeakSpeak, - AgentV1UpdateSpeakSpeakEndpoint, - AgentV1UpdateSpeakSpeakEndpointEndpoint, - AgentV1UpdateSpeakSpeakEndpointEndpointParams, - AgentV1UpdateSpeakSpeakEndpointParams, - AgentV1UpdateSpeakSpeakEndpointProvider, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, - AgentV1UpdateSpeakSpeakEndpointProviderCartesia, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgram, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAi, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, - AgentV1UpdateSpeakSpeakEndpointProviderParams, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia, - AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram, - AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams, - AgentV1UpdateSpeakSpeakOneItem, - AgentV1UpdateSpeakSpeakOneItemEndpoint, - AgentV1UpdateSpeakSpeakOneItemEndpointParams, - AgentV1UpdateSpeakSpeakOneItemParams, - AgentV1UpdateSpeakSpeakOneItemProvider, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, - AgentV1UpdateSpeakSpeakOneItemProviderCartesia, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgram, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAi, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, - AgentV1UpdateSpeakSpeakOneItemProviderParams, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia, - AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram, - AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams, AgentV1UpdateSpeakSpeakParams, - AgentV1UpdateSpeakType, + AgentV1UpdateThink, + AgentV1UpdateThinkParams, + AgentV1UpdateThinkThink, + AgentV1UpdateThinkThinkParams, AgentV1UserStartedSpeaking, AgentV1UserStartedSpeakingParams, - AgentV1UserStartedSpeakingType, AgentV1Warning, AgentV1WarningParams, - AgentV1WarningType, AgentV1Welcome, AgentV1WelcomeParams, - AgentV1WelcomeType, - Cartesia, - CartesiaParams, - Deepgram, - DeepgramParams, - ElevenLabs, - ElevenLabsParams, - Max, ) _dynamic_imports: typing.Dict[str, str] = { "AgentV1AgentAudioDone": ".v1", "AgentV1AgentAudioDoneParams": ".v1", - "AgentV1AgentAudioDoneType": ".v1", "AgentV1AgentStartedSpeaking": ".v1", "AgentV1AgentStartedSpeakingParams": ".v1", - "AgentV1AgentStartedSpeakingType": ".v1", "AgentV1AgentThinking": ".v1", "AgentV1AgentThinkingParams": ".v1", - "AgentV1AgentThinkingType": ".v1", "AgentV1ConversationText": ".v1", "AgentV1ConversationTextParams": ".v1", "AgentV1ConversationTextRole": ".v1", - "AgentV1ConversationTextType": ".v1", "AgentV1Error": ".v1", "AgentV1ErrorParams": ".v1", - "AgentV1ErrorType": ".v1", "AgentV1FunctionCallRequest": ".v1", "AgentV1FunctionCallRequestFunctionsItem": ".v1", "AgentV1FunctionCallRequestFunctionsItemParams": ".v1", "AgentV1FunctionCallRequestParams": ".v1", - "AgentV1FunctionCallRequestType": ".v1", "AgentV1InjectAgentMessage": ".v1", "AgentV1InjectAgentMessageParams": ".v1", - "AgentV1InjectAgentMessageType": ".v1", "AgentV1InjectUserMessage": ".v1", "AgentV1InjectUserMessageParams": ".v1", - "AgentV1InjectUserMessageType": ".v1", "AgentV1InjectionRefused": ".v1", "AgentV1InjectionRefusedParams": ".v1", - "AgentV1InjectionRefusedType": ".v1", "AgentV1KeepAlive": ".v1", "AgentV1KeepAliveParams": ".v1", - "AgentV1KeepAliveType": ".v1", "AgentV1PromptUpdated": ".v1", "AgentV1PromptUpdatedParams": ".v1", - "AgentV1PromptUpdatedType": ".v1", "AgentV1ReceiveFunctionCallResponse": ".v1", "AgentV1ReceiveFunctionCallResponseParams": ".v1", - "AgentV1ReceiveFunctionCallResponseType": ".v1", "AgentV1SendFunctionCallResponse": ".v1", "AgentV1SendFunctionCallResponseParams": ".v1", - "AgentV1SendFunctionCallResponseType": ".v1", "AgentV1Settings": ".v1", "AgentV1SettingsAgent": ".v1", "AgentV1SettingsAgentContext": ".v1", @@ -345,12 +138,10 @@ "AgentV1SettingsAgentContextMessagesItemContent": ".v1", "AgentV1SettingsAgentContextMessagesItemContentParams": ".v1", "AgentV1SettingsAgentContextMessagesItemContentRole": ".v1", - "AgentV1SettingsAgentContextMessagesItemContentType": ".v1", "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".v1", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".v1", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".v1", "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".v1", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType": ".v1", "AgentV1SettingsAgentContextMessagesItemParams": ".v1", "AgentV1SettingsAgentContextParams": ".v1", "AgentV1SettingsAgentListen": ".v1", @@ -359,106 +150,19 @@ "AgentV1SettingsAgentListenProviderParams": ".v1", "AgentV1SettingsAgentListenProviderV1": ".v1", "AgentV1SettingsAgentListenProviderV1Params": ".v1", - "AgentV1SettingsAgentListenProviderV1Type": ".v1", "AgentV1SettingsAgentListenProviderV2": ".v1", "AgentV1SettingsAgentListenProviderV2Params": ".v1", - "AgentV1SettingsAgentListenProviderV2Type": ".v1", "AgentV1SettingsAgentListenProvider_V1": ".v1", "AgentV1SettingsAgentListenProvider_V1Params": ".v1", "AgentV1SettingsAgentListenProvider_V2": ".v1", "AgentV1SettingsAgentListenProvider_V2Params": ".v1", "AgentV1SettingsAgentParams": ".v1", "AgentV1SettingsAgentSpeak": ".v1", - "AgentV1SettingsAgentSpeakEndpoint": ".v1", - "AgentV1SettingsAgentSpeakEndpointEndpoint": ".v1", - "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".v1", - "AgentV1SettingsAgentSpeakEndpointProviderParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".v1", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".v1", - "AgentV1SettingsAgentSpeakOneItem": ".v1", - "AgentV1SettingsAgentSpeakOneItemEndpoint": ".v1", - "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".v1", - "AgentV1SettingsAgentSpeakOneItemProviderParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".v1", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".v1", "AgentV1SettingsAgentSpeakParams": ".v1", "AgentV1SettingsAgentThink": ".v1", - "AgentV1SettingsAgentThinkOneItem": ".v1", - "AgentV1SettingsAgentThinkOneItemContextLength": ".v1", - "AgentV1SettingsAgentThinkOneItemContextLengthParams": ".v1", - "AgentV1SettingsAgentThinkOneItemEndpoint": ".v1", - "AgentV1SettingsAgentThinkOneItemEndpointParams": ".v1", - "AgentV1SettingsAgentThinkOneItemFunctionsItem": ".v1", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint": ".v1", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams": ".v1", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams": ".v1", - "AgentV1SettingsAgentThinkOneItemParams": ".v1", - "AgentV1SettingsAgentThinkOneItemProvider": ".v1", - "AgentV1SettingsAgentThinkOneItemProviderParams": ".v1", "AgentV1SettingsAgentThinkParams": ".v1", "AgentV1SettingsApplied": ".v1", "AgentV1SettingsAppliedParams": ".v1", - "AgentV1SettingsAppliedType": ".v1", "AgentV1SettingsAudio": ".v1", "AgentV1SettingsAudioInput": ".v1", "AgentV1SettingsAudioInputEncoding": ".v1", @@ -470,118 +174,26 @@ "AgentV1SettingsFlags": ".v1", "AgentV1SettingsFlagsParams": ".v1", "AgentV1SettingsParams": ".v1", - "AgentV1SettingsType": ".v1", "AgentV1SpeakUpdated": ".v1", "AgentV1SpeakUpdatedParams": ".v1", - "AgentV1SpeakUpdatedType": ".v1", + "AgentV1ThinkUpdated": ".v1", + "AgentV1ThinkUpdatedParams": ".v1", "AgentV1UpdatePrompt": ".v1", "AgentV1UpdatePromptParams": ".v1", - "AgentV1UpdatePromptType": ".v1", "AgentV1UpdateSpeak": ".v1", "AgentV1UpdateSpeakParams": ".v1", "AgentV1UpdateSpeakSpeak": ".v1", - "AgentV1UpdateSpeakSpeakEndpoint": ".v1", - "AgentV1UpdateSpeakSpeakEndpointEndpoint": ".v1", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProviderParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi": ".v1", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItem": ".v1", - "AgentV1UpdateSpeakSpeakOneItemEndpoint": ".v1", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProviderParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi": ".v1", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams": ".v1", "AgentV1UpdateSpeakSpeakParams": ".v1", - "AgentV1UpdateSpeakType": ".v1", + "AgentV1UpdateThink": ".v1", + "AgentV1UpdateThinkParams": ".v1", + "AgentV1UpdateThinkThink": ".v1", + "AgentV1UpdateThinkThinkParams": ".v1", "AgentV1UserStartedSpeaking": ".v1", "AgentV1UserStartedSpeakingParams": ".v1", - "AgentV1UserStartedSpeakingType": ".v1", "AgentV1Warning": ".v1", "AgentV1WarningParams": ".v1", - "AgentV1WarningType": ".v1", "AgentV1Welcome": ".v1", "AgentV1WelcomeParams": ".v1", - "AgentV1WelcomeType": ".v1", - "Cartesia": ".v1", - "CartesiaParams": ".v1", - "Deepgram": ".v1", - "DeepgramParams": ".v1", - "ElevenLabs": ".v1", - "ElevenLabsParams": ".v1", - "Max": ".v1", "v1": ".v1", } @@ -610,46 +222,33 @@ def __dir__(): __all__ = [ "AgentV1AgentAudioDone", "AgentV1AgentAudioDoneParams", - "AgentV1AgentAudioDoneType", "AgentV1AgentStartedSpeaking", "AgentV1AgentStartedSpeakingParams", - "AgentV1AgentStartedSpeakingType", "AgentV1AgentThinking", "AgentV1AgentThinkingParams", - "AgentV1AgentThinkingType", "AgentV1ConversationText", "AgentV1ConversationTextParams", "AgentV1ConversationTextRole", - "AgentV1ConversationTextType", "AgentV1Error", "AgentV1ErrorParams", - "AgentV1ErrorType", "AgentV1FunctionCallRequest", "AgentV1FunctionCallRequestFunctionsItem", "AgentV1FunctionCallRequestFunctionsItemParams", "AgentV1FunctionCallRequestParams", - "AgentV1FunctionCallRequestType", "AgentV1InjectAgentMessage", "AgentV1InjectAgentMessageParams", - "AgentV1InjectAgentMessageType", "AgentV1InjectUserMessage", "AgentV1InjectUserMessageParams", - "AgentV1InjectUserMessageType", "AgentV1InjectionRefused", "AgentV1InjectionRefusedParams", - "AgentV1InjectionRefusedType", "AgentV1KeepAlive", "AgentV1KeepAliveParams", - "AgentV1KeepAliveType", "AgentV1PromptUpdated", "AgentV1PromptUpdatedParams", - "AgentV1PromptUpdatedType", "AgentV1ReceiveFunctionCallResponse", "AgentV1ReceiveFunctionCallResponseParams", - "AgentV1ReceiveFunctionCallResponseType", "AgentV1SendFunctionCallResponse", "AgentV1SendFunctionCallResponseParams", - "AgentV1SendFunctionCallResponseType", "AgentV1Settings", "AgentV1SettingsAgent", "AgentV1SettingsAgentContext", @@ -657,12 +256,10 @@ def __dir__(): "AgentV1SettingsAgentContextMessagesItemContent", "AgentV1SettingsAgentContextMessagesItemContentParams", "AgentV1SettingsAgentContextMessagesItemContentRole", - "AgentV1SettingsAgentContextMessagesItemContentType", "AgentV1SettingsAgentContextMessagesItemFunctionCalls", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams", "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType", "AgentV1SettingsAgentContextMessagesItemParams", "AgentV1SettingsAgentContextParams", "AgentV1SettingsAgentListen", @@ -671,106 +268,19 @@ def __dir__(): "AgentV1SettingsAgentListenProviderParams", "AgentV1SettingsAgentListenProviderV1", "AgentV1SettingsAgentListenProviderV1Params", - "AgentV1SettingsAgentListenProviderV1Type", "AgentV1SettingsAgentListenProviderV2", "AgentV1SettingsAgentListenProviderV2Params", - "AgentV1SettingsAgentListenProviderV2Type", "AgentV1SettingsAgentListenProvider_V1", "AgentV1SettingsAgentListenProvider_V1Params", "AgentV1SettingsAgentListenProvider_V2", "AgentV1SettingsAgentListenProvider_V2Params", "AgentV1SettingsAgentParams", "AgentV1SettingsAgentSpeak", - "AgentV1SettingsAgentSpeakEndpoint", - "AgentV1SettingsAgentSpeakEndpointEndpoint", - "AgentV1SettingsAgentSpeakEndpointEndpointParams", - "AgentV1SettingsAgentSpeakEndpointParams", - "AgentV1SettingsAgentSpeakEndpointProvider", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakEndpointProviderParams", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams", - "AgentV1SettingsAgentSpeakOneItem", - "AgentV1SettingsAgentSpeakOneItemEndpoint", - "AgentV1SettingsAgentSpeakOneItemEndpointParams", - "AgentV1SettingsAgentSpeakOneItemParams", - "AgentV1SettingsAgentSpeakOneItemProvider", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakOneItemProviderParams", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams", "AgentV1SettingsAgentSpeakParams", "AgentV1SettingsAgentThink", - "AgentV1SettingsAgentThinkOneItem", - "AgentV1SettingsAgentThinkOneItemContextLength", - "AgentV1SettingsAgentThinkOneItemContextLengthParams", - "AgentV1SettingsAgentThinkOneItemEndpoint", - "AgentV1SettingsAgentThinkOneItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItem", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams", - "AgentV1SettingsAgentThinkOneItemParams", - "AgentV1SettingsAgentThinkOneItemProvider", - "AgentV1SettingsAgentThinkOneItemProviderParams", "AgentV1SettingsAgentThinkParams", "AgentV1SettingsApplied", "AgentV1SettingsAppliedParams", - "AgentV1SettingsAppliedType", "AgentV1SettingsAudio", "AgentV1SettingsAudioInput", "AgentV1SettingsAudioInputEncoding", @@ -782,117 +292,25 @@ def __dir__(): "AgentV1SettingsFlags", "AgentV1SettingsFlagsParams", "AgentV1SettingsParams", - "AgentV1SettingsType", "AgentV1SpeakUpdated", "AgentV1SpeakUpdatedParams", - "AgentV1SpeakUpdatedType", + "AgentV1ThinkUpdated", + "AgentV1ThinkUpdatedParams", "AgentV1UpdatePrompt", "AgentV1UpdatePromptParams", - "AgentV1UpdatePromptType", "AgentV1UpdateSpeak", "AgentV1UpdateSpeakParams", "AgentV1UpdateSpeakSpeak", - "AgentV1UpdateSpeakSpeakEndpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointProvider", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams", - "AgentV1UpdateSpeakSpeakOneItem", - "AgentV1UpdateSpeakSpeakOneItemEndpoint", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams", - "AgentV1UpdateSpeakSpeakOneItemParams", - "AgentV1UpdateSpeakSpeakOneItemProvider", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams", "AgentV1UpdateSpeakSpeakParams", - "AgentV1UpdateSpeakType", + "AgentV1UpdateThink", + "AgentV1UpdateThinkParams", + "AgentV1UpdateThinkThink", + "AgentV1UpdateThinkThinkParams", "AgentV1UserStartedSpeaking", "AgentV1UserStartedSpeakingParams", - "AgentV1UserStartedSpeakingType", "AgentV1Warning", "AgentV1WarningParams", - "AgentV1WarningType", "AgentV1Welcome", "AgentV1WelcomeParams", - "AgentV1WelcomeType", - "Cartesia", - "CartesiaParams", - "Deepgram", - "DeepgramParams", - "ElevenLabs", - "ElevenLabsParams", - "Max", "v1", ] diff --git a/src/deepgram/agent/v1/__init__.py b/src/deepgram/agent/v1/__init__.py index c4fe1e2e..a6de165a 100644 --- a/src/deepgram/agent/v1/__init__.py +++ b/src/deepgram/agent/v1/__init__.py @@ -8,187 +8,53 @@ if typing.TYPE_CHECKING: from .types import ( AgentV1AgentAudioDone, - AgentV1AgentAudioDoneType, AgentV1AgentStartedSpeaking, - AgentV1AgentStartedSpeakingType, AgentV1AgentThinking, - AgentV1AgentThinkingType, AgentV1ConversationText, AgentV1ConversationTextRole, - AgentV1ConversationTextType, AgentV1Error, - AgentV1ErrorType, AgentV1FunctionCallRequest, AgentV1FunctionCallRequestFunctionsItem, - AgentV1FunctionCallRequestType, AgentV1InjectAgentMessage, - AgentV1InjectAgentMessageType, AgentV1InjectUserMessage, - AgentV1InjectUserMessageType, AgentV1InjectionRefused, - AgentV1InjectionRefusedType, AgentV1KeepAlive, - AgentV1KeepAliveType, AgentV1PromptUpdated, - AgentV1PromptUpdatedType, AgentV1ReceiveFunctionCallResponse, - AgentV1ReceiveFunctionCallResponseType, AgentV1SendFunctionCallResponse, - AgentV1SendFunctionCallResponseType, AgentV1Settings, AgentV1SettingsAgent, AgentV1SettingsAgentContext, AgentV1SettingsAgentContextMessagesItem, AgentV1SettingsAgentContextMessagesItemContent, AgentV1SettingsAgentContextMessagesItemContentRole, - AgentV1SettingsAgentContextMessagesItemContentType, AgentV1SettingsAgentContextMessagesItemFunctionCalls, AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem, - AgentV1SettingsAgentContextMessagesItemFunctionCallsType, AgentV1SettingsAgentListen, AgentV1SettingsAgentListenProvider, AgentV1SettingsAgentListenProviderV1, - AgentV1SettingsAgentListenProviderV1Type, AgentV1SettingsAgentListenProviderV2, - AgentV1SettingsAgentListenProviderV2Type, AgentV1SettingsAgentListenProvider_V1, AgentV1SettingsAgentListenProvider_V2, AgentV1SettingsAgentSpeak, - AgentV1SettingsAgentSpeakEndpoint, - AgentV1SettingsAgentSpeakEndpointEndpoint, - AgentV1SettingsAgentSpeakEndpointProvider, - AgentV1SettingsAgentSpeakEndpointProviderAwsPolly, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice, - AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel, - AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion, - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId, - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion, - AgentV1SettingsAgentSpeakEndpointProviderOpenAi, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly, - AgentV1SettingsAgentSpeakEndpointProvider_Cartesia, - AgentV1SettingsAgentSpeakEndpointProvider_Deepgram, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAi, - AgentV1SettingsAgentSpeakOneItem, - AgentV1SettingsAgentSpeakOneItemEndpoint, - AgentV1SettingsAgentSpeakOneItemProvider, - AgentV1SettingsAgentSpeakOneItemProviderAwsPolly, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, - AgentV1SettingsAgentSpeakOneItemProviderOpenAi, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly, - AgentV1SettingsAgentSpeakOneItemProvider_Cartesia, - AgentV1SettingsAgentSpeakOneItemProvider_Deepgram, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAi, AgentV1SettingsAgentThink, - AgentV1SettingsAgentThinkOneItem, - AgentV1SettingsAgentThinkOneItemContextLength, - AgentV1SettingsAgentThinkOneItemEndpoint, - AgentV1SettingsAgentThinkOneItemFunctionsItem, - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint, - AgentV1SettingsAgentThinkOneItemProvider, AgentV1SettingsApplied, - AgentV1SettingsAppliedType, AgentV1SettingsAudio, AgentV1SettingsAudioInput, AgentV1SettingsAudioInputEncoding, AgentV1SettingsAudioOutput, AgentV1SettingsAudioOutputEncoding, AgentV1SettingsFlags, - AgentV1SettingsType, AgentV1SpeakUpdated, - AgentV1SpeakUpdatedType, + AgentV1ThinkUpdated, AgentV1UpdatePrompt, - AgentV1UpdatePromptType, AgentV1UpdateSpeak, AgentV1UpdateSpeakSpeak, - AgentV1UpdateSpeakSpeakEndpoint, - AgentV1UpdateSpeakSpeakEndpointEndpoint, - AgentV1UpdateSpeakSpeakEndpointProvider, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, - AgentV1UpdateSpeakSpeakEndpointProviderCartesia, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgram, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAi, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly, - AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia, - AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi, - AgentV1UpdateSpeakSpeakOneItem, - AgentV1UpdateSpeakSpeakOneItemEndpoint, - AgentV1UpdateSpeakSpeakOneItemProvider, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, - AgentV1UpdateSpeakSpeakOneItemProviderCartesia, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgram, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAi, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly, - AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia, - AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi, - AgentV1UpdateSpeakType, + AgentV1UpdateThink, + AgentV1UpdateThinkThink, AgentV1UserStartedSpeaking, - AgentV1UserStartedSpeakingType, AgentV1Warning, - AgentV1WarningType, AgentV1Welcome, - AgentV1WelcomeType, - Cartesia, - Deepgram, - ElevenLabs, - Max, ) from . import settings from .requests import ( @@ -218,37 +84,7 @@ AgentV1SettingsAgentListenProvider_V1Params, AgentV1SettingsAgentListenProvider_V2Params, AgentV1SettingsAgentParams, - AgentV1SettingsAgentSpeakEndpointEndpointParams, - AgentV1SettingsAgentSpeakEndpointParams, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams, - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams, - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams, - AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams, - AgentV1SettingsAgentSpeakEndpointProviderParams, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams, - AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams, - AgentV1SettingsAgentSpeakOneItemEndpointParams, - AgentV1SettingsAgentSpeakOneItemParams, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams, - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams, - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, - AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams, - AgentV1SettingsAgentSpeakOneItemProviderParams, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams, - AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams, AgentV1SettingsAgentSpeakParams, - AgentV1SettingsAgentThinkOneItemContextLengthParams, - AgentV1SettingsAgentThinkOneItemEndpointParams, - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams, - AgentV1SettingsAgentThinkOneItemFunctionsItemParams, - AgentV1SettingsAgentThinkOneItemParams, - AgentV1SettingsAgentThinkOneItemProviderParams, AgentV1SettingsAgentThinkParams, AgentV1SettingsAppliedParams, AgentV1SettingsAudioInputParams, @@ -257,89 +93,46 @@ AgentV1SettingsFlagsParams, AgentV1SettingsParams, AgentV1SpeakUpdatedParams, + AgentV1ThinkUpdatedParams, AgentV1UpdatePromptParams, AgentV1UpdateSpeakParams, - AgentV1UpdateSpeakSpeakEndpointEndpointParams, - AgentV1UpdateSpeakSpeakEndpointParams, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams, - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams, - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams, - AgentV1UpdateSpeakSpeakEndpointProviderParams, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams, - AgentV1UpdateSpeakSpeakOneItemEndpointParams, - AgentV1UpdateSpeakSpeakOneItemParams, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams, - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams, - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams, - AgentV1UpdateSpeakSpeakOneItemProviderParams, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams, AgentV1UpdateSpeakSpeakParams, + AgentV1UpdateThinkParams, + AgentV1UpdateThinkThinkParams, AgentV1UserStartedSpeakingParams, AgentV1WarningParams, AgentV1WelcomeParams, - CartesiaParams, - DeepgramParams, - ElevenLabsParams, ) _dynamic_imports: typing.Dict[str, str] = { "AgentV1AgentAudioDone": ".types", "AgentV1AgentAudioDoneParams": ".requests", - "AgentV1AgentAudioDoneType": ".types", "AgentV1AgentStartedSpeaking": ".types", "AgentV1AgentStartedSpeakingParams": ".requests", - "AgentV1AgentStartedSpeakingType": ".types", "AgentV1AgentThinking": ".types", "AgentV1AgentThinkingParams": ".requests", - "AgentV1AgentThinkingType": ".types", "AgentV1ConversationText": ".types", "AgentV1ConversationTextParams": ".requests", "AgentV1ConversationTextRole": ".types", - "AgentV1ConversationTextType": ".types", "AgentV1Error": ".types", "AgentV1ErrorParams": ".requests", - "AgentV1ErrorType": ".types", "AgentV1FunctionCallRequest": ".types", "AgentV1FunctionCallRequestFunctionsItem": ".types", "AgentV1FunctionCallRequestFunctionsItemParams": ".requests", "AgentV1FunctionCallRequestParams": ".requests", - "AgentV1FunctionCallRequestType": ".types", "AgentV1InjectAgentMessage": ".types", "AgentV1InjectAgentMessageParams": ".requests", - "AgentV1InjectAgentMessageType": ".types", "AgentV1InjectUserMessage": ".types", "AgentV1InjectUserMessageParams": ".requests", - "AgentV1InjectUserMessageType": ".types", "AgentV1InjectionRefused": ".types", "AgentV1InjectionRefusedParams": ".requests", - "AgentV1InjectionRefusedType": ".types", "AgentV1KeepAlive": ".types", "AgentV1KeepAliveParams": ".requests", - "AgentV1KeepAliveType": ".types", "AgentV1PromptUpdated": ".types", "AgentV1PromptUpdatedParams": ".requests", - "AgentV1PromptUpdatedType": ".types", "AgentV1ReceiveFunctionCallResponse": ".types", "AgentV1ReceiveFunctionCallResponseParams": ".requests", - "AgentV1ReceiveFunctionCallResponseType": ".types", "AgentV1SendFunctionCallResponse": ".types", "AgentV1SendFunctionCallResponseParams": ".requests", - "AgentV1SendFunctionCallResponseType": ".types", "AgentV1Settings": ".types", "AgentV1SettingsAgent": ".types", "AgentV1SettingsAgentContext": ".types", @@ -347,12 +140,10 @@ "AgentV1SettingsAgentContextMessagesItemContent": ".types", "AgentV1SettingsAgentContextMessagesItemContentParams": ".requests", "AgentV1SettingsAgentContextMessagesItemContentRole": ".types", - "AgentV1SettingsAgentContextMessagesItemContentType": ".types", "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".types", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".types", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams": ".requests", "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams": ".requests", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType": ".types", "AgentV1SettingsAgentContextMessagesItemParams": ".requests", "AgentV1SettingsAgentContextParams": ".requests", "AgentV1SettingsAgentListen": ".types", @@ -361,106 +152,19 @@ "AgentV1SettingsAgentListenProviderParams": ".requests", "AgentV1SettingsAgentListenProviderV1": ".types", "AgentV1SettingsAgentListenProviderV1Params": ".requests", - "AgentV1SettingsAgentListenProviderV1Type": ".types", "AgentV1SettingsAgentListenProviderV2": ".types", "AgentV1SettingsAgentListenProviderV2Params": ".requests", - "AgentV1SettingsAgentListenProviderV2Type": ".types", "AgentV1SettingsAgentListenProvider_V1": ".types", "AgentV1SettingsAgentListenProvider_V1Params": ".requests", "AgentV1SettingsAgentListenProvider_V2": ".types", "AgentV1SettingsAgentListenProvider_V2Params": ".requests", "AgentV1SettingsAgentParams": ".requests", "AgentV1SettingsAgentSpeak": ".types", - "AgentV1SettingsAgentSpeakEndpoint": ".types", - "AgentV1SettingsAgentSpeakEndpointEndpoint": ".types", - "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".types", - "AgentV1SettingsAgentSpeakEndpointProviderParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".types", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".types", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".types", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".types", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".requests", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".types", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".requests", - "AgentV1SettingsAgentSpeakOneItem": ".types", - "AgentV1SettingsAgentSpeakOneItemEndpoint": ".types", - "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".types", - "AgentV1SettingsAgentSpeakOneItemProviderParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".types", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".types", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".types", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".types", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".requests", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".types", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".requests", "AgentV1SettingsAgentSpeakParams": ".requests", "AgentV1SettingsAgentThink": ".types", - "AgentV1SettingsAgentThinkOneItem": ".types", - "AgentV1SettingsAgentThinkOneItemContextLength": ".types", - "AgentV1SettingsAgentThinkOneItemContextLengthParams": ".requests", - "AgentV1SettingsAgentThinkOneItemEndpoint": ".types", - "AgentV1SettingsAgentThinkOneItemEndpointParams": ".requests", - "AgentV1SettingsAgentThinkOneItemFunctionsItem": ".types", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint": ".types", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams": ".requests", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams": ".requests", - "AgentV1SettingsAgentThinkOneItemParams": ".requests", - "AgentV1SettingsAgentThinkOneItemProvider": ".types", - "AgentV1SettingsAgentThinkOneItemProviderParams": ".requests", "AgentV1SettingsAgentThinkParams": ".requests", "AgentV1SettingsApplied": ".types", "AgentV1SettingsAppliedParams": ".requests", - "AgentV1SettingsAppliedType": ".types", "AgentV1SettingsAudio": ".types", "AgentV1SettingsAudioInput": ".types", "AgentV1SettingsAudioInputEncoding": ".types", @@ -472,118 +176,26 @@ "AgentV1SettingsFlags": ".types", "AgentV1SettingsFlagsParams": ".requests", "AgentV1SettingsParams": ".requests", - "AgentV1SettingsType": ".types", "AgentV1SpeakUpdated": ".types", "AgentV1SpeakUpdatedParams": ".requests", - "AgentV1SpeakUpdatedType": ".types", + "AgentV1ThinkUpdated": ".types", + "AgentV1ThinkUpdatedParams": ".requests", "AgentV1UpdatePrompt": ".types", "AgentV1UpdatePromptParams": ".requests", - "AgentV1UpdatePromptType": ".types", "AgentV1UpdateSpeak": ".types", "AgentV1UpdateSpeakParams": ".requests", "AgentV1UpdateSpeakSpeak": ".types", - "AgentV1UpdateSpeakSpeakEndpoint": ".types", - "AgentV1UpdateSpeakSpeakEndpointEndpoint": ".types", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice": ".types", - "AgentV1UpdateSpeakSpeakEndpointProviderParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly": ".types", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia": ".types", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram": ".types", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs": ".types", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams": ".requests", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi": ".types", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItem": ".types", - "AgentV1UpdateSpeakSpeakOneItemEndpoint": ".types", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice": ".types", - "AgentV1UpdateSpeakSpeakOneItemProviderParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly": ".types", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia": ".types", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram": ".types", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs": ".types", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams": ".requests", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi": ".types", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams": ".requests", "AgentV1UpdateSpeakSpeakParams": ".requests", - "AgentV1UpdateSpeakType": ".types", + "AgentV1UpdateThink": ".types", + "AgentV1UpdateThinkParams": ".requests", + "AgentV1UpdateThinkThink": ".types", + "AgentV1UpdateThinkThinkParams": ".requests", "AgentV1UserStartedSpeaking": ".types", "AgentV1UserStartedSpeakingParams": ".requests", - "AgentV1UserStartedSpeakingType": ".types", "AgentV1Warning": ".types", "AgentV1WarningParams": ".requests", - "AgentV1WarningType": ".types", "AgentV1Welcome": ".types", "AgentV1WelcomeParams": ".requests", - "AgentV1WelcomeType": ".types", - "Cartesia": ".types", - "CartesiaParams": ".requests", - "Deepgram": ".types", - "DeepgramParams": ".requests", - "ElevenLabs": ".types", - "ElevenLabsParams": ".requests", - "Max": ".types", "settings": ".settings", } @@ -612,46 +224,33 @@ def __dir__(): __all__ = [ "AgentV1AgentAudioDone", "AgentV1AgentAudioDoneParams", - "AgentV1AgentAudioDoneType", "AgentV1AgentStartedSpeaking", "AgentV1AgentStartedSpeakingParams", - "AgentV1AgentStartedSpeakingType", "AgentV1AgentThinking", "AgentV1AgentThinkingParams", - "AgentV1AgentThinkingType", "AgentV1ConversationText", "AgentV1ConversationTextParams", "AgentV1ConversationTextRole", - "AgentV1ConversationTextType", "AgentV1Error", "AgentV1ErrorParams", - "AgentV1ErrorType", "AgentV1FunctionCallRequest", "AgentV1FunctionCallRequestFunctionsItem", "AgentV1FunctionCallRequestFunctionsItemParams", "AgentV1FunctionCallRequestParams", - "AgentV1FunctionCallRequestType", "AgentV1InjectAgentMessage", "AgentV1InjectAgentMessageParams", - "AgentV1InjectAgentMessageType", "AgentV1InjectUserMessage", "AgentV1InjectUserMessageParams", - "AgentV1InjectUserMessageType", "AgentV1InjectionRefused", "AgentV1InjectionRefusedParams", - "AgentV1InjectionRefusedType", "AgentV1KeepAlive", "AgentV1KeepAliveParams", - "AgentV1KeepAliveType", "AgentV1PromptUpdated", "AgentV1PromptUpdatedParams", - "AgentV1PromptUpdatedType", "AgentV1ReceiveFunctionCallResponse", "AgentV1ReceiveFunctionCallResponseParams", - "AgentV1ReceiveFunctionCallResponseType", "AgentV1SendFunctionCallResponse", "AgentV1SendFunctionCallResponseParams", - "AgentV1SendFunctionCallResponseType", "AgentV1Settings", "AgentV1SettingsAgent", "AgentV1SettingsAgentContext", @@ -659,12 +258,10 @@ def __dir__(): "AgentV1SettingsAgentContextMessagesItemContent", "AgentV1SettingsAgentContextMessagesItemContentParams", "AgentV1SettingsAgentContextMessagesItemContentRole", - "AgentV1SettingsAgentContextMessagesItemContentType", "AgentV1SettingsAgentContextMessagesItemFunctionCalls", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams", "AgentV1SettingsAgentContextMessagesItemFunctionCallsParams", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType", "AgentV1SettingsAgentContextMessagesItemParams", "AgentV1SettingsAgentContextParams", "AgentV1SettingsAgentListen", @@ -673,106 +270,19 @@ def __dir__(): "AgentV1SettingsAgentListenProviderParams", "AgentV1SettingsAgentListenProviderV1", "AgentV1SettingsAgentListenProviderV1Params", - "AgentV1SettingsAgentListenProviderV1Type", "AgentV1SettingsAgentListenProviderV2", "AgentV1SettingsAgentListenProviderV2Params", - "AgentV1SettingsAgentListenProviderV2Type", "AgentV1SettingsAgentListenProvider_V1", "AgentV1SettingsAgentListenProvider_V1Params", "AgentV1SettingsAgentListenProvider_V2", "AgentV1SettingsAgentListenProvider_V2Params", "AgentV1SettingsAgentParams", "AgentV1SettingsAgentSpeak", - "AgentV1SettingsAgentSpeakEndpoint", - "AgentV1SettingsAgentSpeakEndpointEndpoint", - "AgentV1SettingsAgentSpeakEndpointEndpointParams", - "AgentV1SettingsAgentSpeakEndpointParams", - "AgentV1SettingsAgentSpeakEndpointProvider", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakEndpointProviderParams", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams", - "AgentV1SettingsAgentSpeakOneItem", - "AgentV1SettingsAgentSpeakOneItemEndpoint", - "AgentV1SettingsAgentSpeakOneItemEndpointParams", - "AgentV1SettingsAgentSpeakOneItemParams", - "AgentV1SettingsAgentSpeakOneItemProvider", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakOneItemProviderParams", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams", "AgentV1SettingsAgentSpeakParams", "AgentV1SettingsAgentThink", - "AgentV1SettingsAgentThinkOneItem", - "AgentV1SettingsAgentThinkOneItemContextLength", - "AgentV1SettingsAgentThinkOneItemContextLengthParams", - "AgentV1SettingsAgentThinkOneItemEndpoint", - "AgentV1SettingsAgentThinkOneItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItem", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams", - "AgentV1SettingsAgentThinkOneItemParams", - "AgentV1SettingsAgentThinkOneItemProvider", - "AgentV1SettingsAgentThinkOneItemProviderParams", "AgentV1SettingsAgentThinkParams", "AgentV1SettingsApplied", "AgentV1SettingsAppliedParams", - "AgentV1SettingsAppliedType", "AgentV1SettingsAudio", "AgentV1SettingsAudioInput", "AgentV1SettingsAudioInputEncoding", @@ -784,117 +294,25 @@ def __dir__(): "AgentV1SettingsFlags", "AgentV1SettingsFlagsParams", "AgentV1SettingsParams", - "AgentV1SettingsType", "AgentV1SpeakUpdated", "AgentV1SpeakUpdatedParams", - "AgentV1SpeakUpdatedType", + "AgentV1ThinkUpdated", + "AgentV1ThinkUpdatedParams", "AgentV1UpdatePrompt", "AgentV1UpdatePromptParams", - "AgentV1UpdatePromptType", "AgentV1UpdateSpeak", "AgentV1UpdateSpeakParams", "AgentV1UpdateSpeakSpeak", - "AgentV1UpdateSpeakSpeakEndpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointProvider", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams", - "AgentV1UpdateSpeakSpeakOneItem", - "AgentV1UpdateSpeakSpeakOneItemEndpoint", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams", - "AgentV1UpdateSpeakSpeakOneItemParams", - "AgentV1UpdateSpeakSpeakOneItemProvider", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams", "AgentV1UpdateSpeakSpeakParams", - "AgentV1UpdateSpeakType", + "AgentV1UpdateThink", + "AgentV1UpdateThinkParams", + "AgentV1UpdateThinkThink", + "AgentV1UpdateThinkThinkParams", "AgentV1UserStartedSpeaking", "AgentV1UserStartedSpeakingParams", - "AgentV1UserStartedSpeakingType", "AgentV1Warning", "AgentV1WarningParams", - "AgentV1WarningType", "AgentV1Welcome", "AgentV1WelcomeParams", - "AgentV1WelcomeType", - "Cartesia", - "CartesiaParams", - "Deepgram", - "DeepgramParams", - "ElevenLabs", - "ElevenLabsParams", - "Max", "settings", ] diff --git a/src/deepgram/agent/v1/requests/__init__.py b/src/deepgram/agent/v1/requests/__init__.py index c3ace7bb..381a3538 100644 --- a/src/deepgram/agent/v1/requests/__init__.py +++ b/src/deepgram/agent/v1/requests/__init__.py @@ -42,138 +42,22 @@ from .agent_v1settings_agent_listen_provider_v1 import AgentV1SettingsAgentListenProviderV1Params from .agent_v1settings_agent_listen_provider_v2 import AgentV1SettingsAgentListenProviderV2Params from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeakParams - from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams - from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams - from .agent_v1settings_agent_speak_endpoint_provider import ( - AgentV1SettingsAgentSpeakEndpointProviderParams, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams, - AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams, - ) - from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams, - ) - from .agent_v1settings_agent_speak_endpoint_provider_open_ai import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams, - ) - from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItemParams - from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpointParams - from .agent_v1settings_agent_speak_one_item_provider import ( - AgentV1SettingsAgentSpeakOneItemProviderParams, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams, - AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams, - AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams, - ) - from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, - ) - from .agent_v1settings_agent_speak_one_item_provider_open_ai import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams, - ) from .agent_v1settings_agent_think import AgentV1SettingsAgentThinkParams - from .agent_v1settings_agent_think_one_item import AgentV1SettingsAgentThinkOneItemParams - from .agent_v1settings_agent_think_one_item_context_length import ( - AgentV1SettingsAgentThinkOneItemContextLengthParams, - ) - from .agent_v1settings_agent_think_one_item_endpoint import AgentV1SettingsAgentThinkOneItemEndpointParams - from .agent_v1settings_agent_think_one_item_functions_item import ( - AgentV1SettingsAgentThinkOneItemFunctionsItemParams, - ) - from .agent_v1settings_agent_think_one_item_functions_item_endpoint import ( - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams, - ) - from .agent_v1settings_agent_think_one_item_provider import AgentV1SettingsAgentThinkOneItemProviderParams from .agent_v1settings_applied import AgentV1SettingsAppliedParams from .agent_v1settings_audio import AgentV1SettingsAudioParams from .agent_v1settings_audio_input import AgentV1SettingsAudioInputParams from .agent_v1settings_audio_output import AgentV1SettingsAudioOutputParams from .agent_v1settings_flags import AgentV1SettingsFlagsParams from .agent_v1speak_updated import AgentV1SpeakUpdatedParams + from .agent_v1think_updated import AgentV1ThinkUpdatedParams from .agent_v1update_prompt import AgentV1UpdatePromptParams from .agent_v1update_speak import AgentV1UpdateSpeakParams from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams - from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams - from .agent_v1update_speak_speak_endpoint_endpoint import AgentV1UpdateSpeakSpeakEndpointEndpointParams - from .agent_v1update_speak_speak_endpoint_provider import ( - AgentV1UpdateSpeakSpeakEndpointProviderParams, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_cartesia import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_deepgram import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_eleven_labs import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams, - ) - from .agent_v1update_speak_speak_endpoint_provider_open_ai import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams, - ) - from .agent_v1update_speak_speak_one_item import AgentV1UpdateSpeakSpeakOneItemParams - from .agent_v1update_speak_speak_one_item_endpoint import AgentV1UpdateSpeakSpeakOneItemEndpointParams - from .agent_v1update_speak_speak_one_item_provider import ( - AgentV1UpdateSpeakSpeakOneItemProviderParams, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams, - AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams, - ) - from .agent_v1update_speak_speak_one_item_provider_cartesia import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams, - ) - from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams, - ) - from .agent_v1update_speak_speak_one_item_provider_deepgram import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams, - ) - from .agent_v1update_speak_speak_one_item_provider_eleven_labs import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams, - ) - from .agent_v1update_speak_speak_one_item_provider_open_ai import AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams + from .agent_v1update_think import AgentV1UpdateThinkParams + from .agent_v1update_think_think import AgentV1UpdateThinkThinkParams from .agent_v1user_started_speaking import AgentV1UserStartedSpeakingParams from .agent_v1warning import AgentV1WarningParams from .agent_v1welcome import AgentV1WelcomeParams - from .cartesia import CartesiaParams - from .deepgram import DeepgramParams - from .eleven_labs import ElevenLabsParams _dynamic_imports: typing.Dict[str, str] = { "AgentV1AgentAudioDoneParams": ".agent_v1agent_audio_done", "AgentV1AgentStartedSpeakingParams": ".agent_v1agent_started_speaking", @@ -201,37 +85,7 @@ "AgentV1SettingsAgentListenProvider_V1Params": ".agent_v1settings_agent_listen_provider", "AgentV1SettingsAgentListenProvider_V2Params": ".agent_v1settings_agent_listen_provider", "AgentV1SettingsAgentParams": ".agent_v1settings_agent", - "AgentV1SettingsAgentSpeakEndpointEndpointParams": ".agent_v1settings_agent_speak_endpoint_endpoint", - "AgentV1SettingsAgentSpeakEndpointParams": ".agent_v1settings_agent_speak_endpoint", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams": ".agent_v1settings_agent_speak_endpoint_provider_open_ai", - "AgentV1SettingsAgentSpeakEndpointProviderParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakOneItemEndpointParams": ".agent_v1settings_agent_speak_one_item_endpoint", - "AgentV1SettingsAgentSpeakOneItemParams": ".agent_v1settings_agent_speak_one_item", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams": ".agent_v1settings_agent_speak_one_item_provider_aws_polly", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams": ".agent_v1settings_agent_speak_one_item_provider_cartesia_voice", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams": ".agent_v1settings_agent_speak_one_item_provider_open_ai", - "AgentV1SettingsAgentSpeakOneItemProviderParams": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams": ".agent_v1settings_agent_speak_one_item_provider", "AgentV1SettingsAgentSpeakParams": ".agent_v1settings_agent_speak", - "AgentV1SettingsAgentThinkOneItemContextLengthParams": ".agent_v1settings_agent_think_one_item_context_length", - "AgentV1SettingsAgentThinkOneItemEndpointParams": ".agent_v1settings_agent_think_one_item_endpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams": ".agent_v1settings_agent_think_one_item_functions_item_endpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams": ".agent_v1settings_agent_think_one_item_functions_item", - "AgentV1SettingsAgentThinkOneItemParams": ".agent_v1settings_agent_think_one_item", - "AgentV1SettingsAgentThinkOneItemProviderParams": ".agent_v1settings_agent_think_one_item_provider", "AgentV1SettingsAgentThinkParams": ".agent_v1settings_agent_think", "AgentV1SettingsAppliedParams": ".agent_v1settings_applied", "AgentV1SettingsAudioInputParams": ".agent_v1settings_audio_input", @@ -240,45 +94,15 @@ "AgentV1SettingsFlagsParams": ".agent_v1settings_flags", "AgentV1SettingsParams": ".agent_v1settings", "AgentV1SpeakUpdatedParams": ".agent_v1speak_updated", + "AgentV1ThinkUpdatedParams": ".agent_v1think_updated", "AgentV1UpdatePromptParams": ".agent_v1update_prompt", "AgentV1UpdateSpeakParams": ".agent_v1update_speak", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams": ".agent_v1update_speak_speak_endpoint_endpoint", - "AgentV1UpdateSpeakSpeakEndpointParams": ".agent_v1update_speak_speak_endpoint", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams": ".agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams": ".agent_v1update_speak_speak_endpoint_provider_aws_polly", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams": ".agent_v1update_speak_speak_endpoint_provider_cartesia", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams": ".agent_v1update_speak_speak_endpoint_provider_cartesia_voice", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams": ".agent_v1update_speak_speak_endpoint_provider_deepgram", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams": ".agent_v1update_speak_speak_endpoint_provider_eleven_labs", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams": ".agent_v1update_speak_speak_endpoint_provider_open_ai", - "AgentV1UpdateSpeakSpeakEndpointProviderParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams": ".agent_v1update_speak_speak_one_item_endpoint", - "AgentV1UpdateSpeakSpeakOneItemParams": ".agent_v1update_speak_speak_one_item", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams": ".agent_v1update_speak_speak_one_item_provider_aws_polly_credentials", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams": ".agent_v1update_speak_speak_one_item_provider_aws_polly", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams": ".agent_v1update_speak_speak_one_item_provider_cartesia", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams": ".agent_v1update_speak_speak_one_item_provider_cartesia_voice", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams": ".agent_v1update_speak_speak_one_item_provider_deepgram", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams": ".agent_v1update_speak_speak_one_item_provider_eleven_labs", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams": ".agent_v1update_speak_speak_one_item_provider_open_ai", - "AgentV1UpdateSpeakSpeakOneItemProviderParams": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams": ".agent_v1update_speak_speak_one_item_provider", "AgentV1UpdateSpeakSpeakParams": ".agent_v1update_speak_speak", + "AgentV1UpdateThinkParams": ".agent_v1update_think", + "AgentV1UpdateThinkThinkParams": ".agent_v1update_think_think", "AgentV1UserStartedSpeakingParams": ".agent_v1user_started_speaking", "AgentV1WarningParams": ".agent_v1warning", "AgentV1WelcomeParams": ".agent_v1welcome", - "CartesiaParams": ".cartesia", - "DeepgramParams": ".deepgram", - "ElevenLabsParams": ".eleven_labs", } @@ -330,37 +154,7 @@ def __dir__(): "AgentV1SettingsAgentListenProvider_V1Params", "AgentV1SettingsAgentListenProvider_V2Params", "AgentV1SettingsAgentParams", - "AgentV1SettingsAgentSpeakEndpointEndpointParams", - "AgentV1SettingsAgentSpeakEndpointParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams", - "AgentV1SettingsAgentSpeakEndpointProviderParams", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams", - "AgentV1SettingsAgentSpeakOneItemEndpointParams", - "AgentV1SettingsAgentSpeakOneItemParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams", - "AgentV1SettingsAgentSpeakOneItemProviderParams", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams", - "AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams", - "AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams", "AgentV1SettingsAgentSpeakParams", - "AgentV1SettingsAgentThinkOneItemContextLengthParams", - "AgentV1SettingsAgentThinkOneItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams", - "AgentV1SettingsAgentThinkOneItemFunctionsItemParams", - "AgentV1SettingsAgentThinkOneItemParams", - "AgentV1SettingsAgentThinkOneItemProviderParams", "AgentV1SettingsAgentThinkParams", "AgentV1SettingsAppliedParams", "AgentV1SettingsAudioInputParams", @@ -369,43 +163,13 @@ def __dir__(): "AgentV1SettingsFlagsParams", "AgentV1SettingsParams", "AgentV1SpeakUpdatedParams", + "AgentV1ThinkUpdatedParams", "AgentV1UpdatePromptParams", "AgentV1UpdateSpeakParams", - "AgentV1UpdateSpeakSpeakEndpointEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakEndpointProviderParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams", - "AgentV1UpdateSpeakSpeakOneItemEndpointParams", - "AgentV1UpdateSpeakSpeakOneItemParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams", - "AgentV1UpdateSpeakSpeakOneItemProviderParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams", "AgentV1UpdateSpeakSpeakParams", + "AgentV1UpdateThinkParams", + "AgentV1UpdateThinkThinkParams", "AgentV1UserStartedSpeakingParams", "AgentV1WarningParams", "AgentV1WelcomeParams", - "CartesiaParams", - "DeepgramParams", - "ElevenLabsParams", ] diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py index 017b2c89..43b4f013 100644 --- a/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py +++ b/src/deepgram/agent/v1/requests/agent_v1agent_audio_done.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1agent_audio_done_type import AgentV1AgentAudioDoneType class AgentV1AgentAudioDoneParams(typing_extensions.TypedDict): - type: AgentV1AgentAudioDoneType + type: typing.Literal["AgentAudioDone"] """ Message type identifier indicating the agent has finished sending audio """ diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py index 85d820c3..39861c94 100644 --- a/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py +++ b/src/deepgram/agent/v1/requests/agent_v1agent_started_speaking.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1agent_started_speaking_type import AgentV1AgentStartedSpeakingType class AgentV1AgentStartedSpeakingParams(typing_extensions.TypedDict): - type: AgentV1AgentStartedSpeakingType + type: typing.Literal["AgentStartedSpeaking"] """ Message type identifier for agent started speaking """ diff --git a/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py index 70bb9041..13434cbc 100644 --- a/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py +++ b/src/deepgram/agent/v1/requests/agent_v1agent_thinking.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1agent_thinking_type import AgentV1AgentThinkingType class AgentV1AgentThinkingParams(typing_extensions.TypedDict): - type: AgentV1AgentThinkingType + type: typing.Literal["AgentThinking"] """ Message type identifier for agent thinking """ diff --git a/src/deepgram/agent/v1/requests/agent_v1conversation_text.py b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py index 40534694..ea0601e3 100644 --- a/src/deepgram/agent/v1/requests/agent_v1conversation_text.py +++ b/src/deepgram/agent/v1/requests/agent_v1conversation_text.py @@ -1,12 +1,13 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions from ..types.agent_v1conversation_text_role import AgentV1ConversationTextRole -from ..types.agent_v1conversation_text_type import AgentV1ConversationTextType class AgentV1ConversationTextParams(typing_extensions.TypedDict): - type: AgentV1ConversationTextType + type: typing.Literal["ConversationText"] """ Message type identifier for conversation text """ diff --git a/src/deepgram/agent/v1/requests/agent_v1error.py b/src/deepgram/agent/v1/requests/agent_v1error.py index f85ab797..23547cb7 100644 --- a/src/deepgram/agent/v1/requests/agent_v1error.py +++ b/src/deepgram/agent/v1/requests/agent_v1error.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1error_type import AgentV1ErrorType class AgentV1ErrorParams(typing_extensions.TypedDict): - type: AgentV1ErrorType + type: typing.Literal["Error"] """ Message type identifier for error responses """ diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py index f65aac14..b00cc6d4 100644 --- a/src/deepgram/agent/v1/requests/agent_v1function_call_request.py +++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request.py @@ -3,12 +3,11 @@ import typing import typing_extensions -from ..types.agent_v1function_call_request_type import AgentV1FunctionCallRequestType from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItemParams class AgentV1FunctionCallRequestParams(typing_extensions.TypedDict): - type: AgentV1FunctionCallRequestType + type: typing.Literal["FunctionCallRequest"] """ Message type identifier for function call requests """ diff --git a/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py index bdc26719..e69cf5b9 100644 --- a/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py +++ b/src/deepgram/agent/v1/requests/agent_v1function_call_request_functions_item.py @@ -23,3 +23,8 @@ class AgentV1FunctionCallRequestFunctionsItemParams(typing_extensions.TypedDict) """ Whether the function should be executed client-side """ + + thought_signature: typing_extensions.NotRequired[str] + """ + Some Gemini models require this as an additional function call identifier + """ diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py index 8ef4156d..8fb718bd 100644 --- a/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py +++ b/src/deepgram/agent/v1/requests/agent_v1inject_agent_message.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1inject_agent_message_type import AgentV1InjectAgentMessageType class AgentV1InjectAgentMessageParams(typing_extensions.TypedDict): - type: AgentV1InjectAgentMessageType + type: typing.Literal["InjectAgentMessage"] """ Message type identifier for injecting an agent message """ diff --git a/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py index ac62ca64..86583a81 100644 --- a/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py +++ b/src/deepgram/agent/v1/requests/agent_v1inject_user_message.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1inject_user_message_type import AgentV1InjectUserMessageType class AgentV1InjectUserMessageParams(typing_extensions.TypedDict): - type: AgentV1InjectUserMessageType + type: typing.Literal["InjectUserMessage"] """ Message type identifier for injecting a user message """ diff --git a/src/deepgram/agent/v1/requests/agent_v1injection_refused.py b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py index 639766fe..e19f3241 100644 --- a/src/deepgram/agent/v1/requests/agent_v1injection_refused.py +++ b/src/deepgram/agent/v1/requests/agent_v1injection_refused.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1injection_refused_type import AgentV1InjectionRefusedType class AgentV1InjectionRefusedParams(typing_extensions.TypedDict): - type: AgentV1InjectionRefusedType + type: typing.Literal["InjectionRefused"] """ Message type identifier for injection refused """ diff --git a/src/deepgram/agent/v1/requests/agent_v1keep_alive.py b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py index 6326cc3f..125eb8ae 100644 --- a/src/deepgram/agent/v1/requests/agent_v1keep_alive.py +++ b/src/deepgram/agent/v1/requests/agent_v1keep_alive.py @@ -1,7 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1keep_alive_type import AgentV1KeepAliveType class AgentV1KeepAliveParams(typing_extensions.TypedDict): @@ -9,7 +10,7 @@ class AgentV1KeepAliveParams(typing_extensions.TypedDict): Send a control message to the agent """ - type: AgentV1KeepAliveType + type: typing.Literal["KeepAlive"] """ Message type identifier """ diff --git a/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py index 7d471c2a..40d5a426 100644 --- a/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py +++ b/src/deepgram/agent/v1/requests/agent_v1prompt_updated.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1prompt_updated_type import AgentV1PromptUpdatedType class AgentV1PromptUpdatedParams(typing_extensions.TypedDict): - type: AgentV1PromptUpdatedType + type: typing.Literal["PromptUpdated"] """ Message type identifier for prompt update confirmation """ diff --git a/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py index 988bafbf..f26fda70 100644 --- a/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py +++ b/src/deepgram/agent/v1/requests/agent_v1receive_function_call_response.py @@ -1,7 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1receive_function_call_response_type import AgentV1ReceiveFunctionCallResponseType class AgentV1ReceiveFunctionCallResponseParams(typing_extensions.TypedDict): @@ -17,7 +18,7 @@ class AgentV1ReceiveFunctionCallResponseParams(typing_extensions.TypedDict): interface for function call responses regardless of execution location. """ - type: AgentV1ReceiveFunctionCallResponseType + type: typing.Literal["FunctionCallResponse"] """ Message type identifier for function call responses """ diff --git a/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py index 87d13ae3..8d75edeb 100644 --- a/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py +++ b/src/deepgram/agent/v1/requests/agent_v1send_function_call_response.py @@ -1,7 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1send_function_call_response_type import AgentV1SendFunctionCallResponseType class AgentV1SendFunctionCallResponseParams(typing_extensions.TypedDict): @@ -17,7 +18,7 @@ class AgentV1SendFunctionCallResponseParams(typing_extensions.TypedDict): interface for function call responses regardless of execution location. """ - type: AgentV1SendFunctionCallResponseType + type: typing.Literal["FunctionCallResponse"] """ Message type identifier for function call responses """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings.py b/src/deepgram/agent/v1/requests/agent_v1settings.py index a1da08ec..2f748dcd 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings.py @@ -3,14 +3,13 @@ import typing import typing_extensions -from ..types.agent_v1settings_type import AgentV1SettingsType from .agent_v1settings_agent import AgentV1SettingsAgentParams from .agent_v1settings_audio import AgentV1SettingsAudioParams from .agent_v1settings_flags import AgentV1SettingsFlagsParams class AgentV1SettingsParams(typing_extensions.TypedDict): - type: AgentV1SettingsType + type: typing.Literal["Settings"] tags: typing_extensions.NotRequired[typing.Sequence[str]] """ Tags to associate with the request diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py index cbb225de..1a541ffc 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_content.py @@ -1,12 +1,11 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions from ..types.agent_v1settings_agent_context_messages_item_content_role import ( AgentV1SettingsAgentContextMessagesItemContentRole, ) -from ..types.agent_v1settings_agent_context_messages_item_content_type import ( - AgentV1SettingsAgentContextMessagesItemContentType, -) class AgentV1SettingsAgentContextMessagesItemContentParams(typing_extensions.TypedDict): @@ -14,7 +13,7 @@ class AgentV1SettingsAgentContextMessagesItemContentParams(typing_extensions.Typ Conversation text as part of the conversation history """ - type: AgentV1SettingsAgentContextMessagesItemContentType + type: typing.Literal["History"] """ Message type identifier for conversation text """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py index 2a5ebd8f..cdc5733c 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls.py @@ -3,9 +3,6 @@ import typing import typing_extensions -from ..types.agent_v1settings_agent_context_messages_item_function_calls_type import ( - AgentV1SettingsAgentContextMessagesItemFunctionCallsType, -) from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import ( AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams, ) @@ -16,7 +13,7 @@ class AgentV1SettingsAgentContextMessagesItemFunctionCallsParams(typing_extensio Client-side or server-side function call request and response as part of the conversation history """ - type: AgentV1SettingsAgentContextMessagesItemFunctionCallsType + type: typing.Literal["History"] function_calls: typing.Sequence[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParams] """ List of function call objects diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py index 9efeb23e..bceedcff 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py @@ -28,3 +28,8 @@ class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItemParam """ Response from the function call """ + + thought_signature: typing_extensions.NotRequired[str] + """ + Some Gemini models require this as an additional function call identifier + """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py index b5fdec2e..1b5b47e8 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider.py @@ -5,13 +5,11 @@ import typing import typing_extensions -from ..types.agent_v1settings_agent_listen_provider_v1type import AgentV1SettingsAgentListenProviderV1Type -from ..types.agent_v1settings_agent_listen_provider_v2type import AgentV1SettingsAgentListenProviderV2Type class AgentV1SettingsAgentListenProvider_V1Params(typing_extensions.TypedDict): version: typing.Literal["v1"] - type: AgentV1SettingsAgentListenProviderV1Type + type: typing.Literal["deepgram"] model: typing_extensions.NotRequired[str] language: typing_extensions.NotRequired[str] keyterms: typing_extensions.NotRequired[typing.Sequence[str]] @@ -20,7 +18,7 @@ class AgentV1SettingsAgentListenProvider_V1Params(typing_extensions.TypedDict): class AgentV1SettingsAgentListenProvider_V2Params(typing_extensions.TypedDict): version: typing.Literal["v2"] - type: AgentV1SettingsAgentListenProviderV2Type + type: typing.Literal["deepgram"] model: str keyterms: typing_extensions.NotRequired[typing.Sequence[str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py index 93cb859d..0fd4e61d 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v1.py @@ -3,11 +3,10 @@ import typing import typing_extensions -from ..types.agent_v1settings_agent_listen_provider_v1type import AgentV1SettingsAgentListenProviderV1Type class AgentV1SettingsAgentListenProviderV1Params(typing_extensions.TypedDict): - type: AgentV1SettingsAgentListenProviderV1Type + type: typing.Literal["deepgram"] """ Provider type for speech-to-text """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py index 46d2ac99..0e0e5c5a 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_listen_provider_v2.py @@ -3,11 +3,10 @@ import typing import typing_extensions -from ..types.agent_v1settings_agent_listen_provider_v2type import AgentV1SettingsAgentListenProviderV2Type class AgentV1SettingsAgentListenProviderV2Params(typing_extensions.TypedDict): - type: AgentV1SettingsAgentListenProviderV2Type + type: typing.Literal["deepgram"] """ Provider type for speech-to-text """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py index 3ae1f7c6..f6fa232a 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak.py @@ -2,9 +2,6 @@ import typing -from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpointParams -from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItemParams +from ....requests.speak_settings_v1 import SpeakSettingsV1Params -AgentV1SettingsAgentSpeakParams = typing.Union[ - AgentV1SettingsAgentSpeakEndpointParams, typing.Sequence[AgentV1SettingsAgentSpeakOneItemParams] -] +AgentV1SettingsAgentSpeakParams = typing.Union[SpeakSettingsV1Params, typing.Sequence[SpeakSettingsV1Params]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py deleted file mode 100644 index d90614be..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpointParams -from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProviderParams - - -class AgentV1SettingsAgentSpeakEndpointParams(typing_extensions.TypedDict): - provider: AgentV1SettingsAgentSpeakEndpointProviderParams - endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakEndpointEndpointParams] - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py deleted file mode 100644 index 604b9ac5..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_endpoint.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class AgentV1SettingsAgentSpeakEndpointEndpointParams(typing_extensions.TypedDict): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing_extensions.NotRequired[str] - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing_extensions.NotRequired[typing.Dict[str, str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py deleted file mode 100644 index b786735a..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider.py +++ /dev/null @@ -1,93 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, -) - - -class AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel - - -class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams(typing_extensions.TypedDict): - type: typing.Literal["eleven_labs"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId - language: typing_extensions.NotRequired[str] - language_code: typing_extensions.NotRequired[str] - - -class AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams(typing_extensions.TypedDict): - type: typing.Literal["cartesia"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - - -class AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams(typing_extensions.TypedDict): - type: typing.Literal["open_ai"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion] - model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel - voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice - - -class AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams(typing_extensions.TypedDict): - type: typing.Literal["aws_polly"] - voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice - language: str - language_code: typing_extensions.NotRequired[str] - engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams - - -AgentV1SettingsAgentSpeakEndpointProviderParams = typing.Union[ - AgentV1SettingsAgentSpeakEndpointProvider_DeepgramParams, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakEndpointProvider_CartesiaParams, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAiParams, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPollyParams, -] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py deleted file mode 100644 index 7335b00b..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyParams(typing_extensions.TypedDict): - voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice - """ - AWS Polly voice name - """ - - language: str - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ - - engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py deleted file mode 100644 index 97ad74b1..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsParams(typing_extensions.TypedDict): - type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing_extensions.NotRequired[str] - """ - Required for STS only - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py deleted file mode 100644 index 51bd279a..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoiceParams(typing_extensions.TypedDict): - mode: str - """ - Cartesia voice mode - """ - - id: str - """ - Cartesia voice ID - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py deleted file mode 100644 index 43538f4e..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_endpoint_provider_open_ai.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, -) -from ..types.agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderOpenAiParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion] - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel - """ - OpenAI TTS model - """ - - voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice - """ - OpenAI voice - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py deleted file mode 100644 index c892106c..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpointParams -from .agent_v1settings_agent_speak_one_item_provider import AgentV1SettingsAgentSpeakOneItemProviderParams - - -class AgentV1SettingsAgentSpeakOneItemParams(typing_extensions.TypedDict): - provider: AgentV1SettingsAgentSpeakOneItemProviderParams - endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemEndpointParams] - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py deleted file mode 100644 index 8c6af538..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_endpoint.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class AgentV1SettingsAgentSpeakOneItemEndpointParams(typing_extensions.TypedDict): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing_extensions.NotRequired[str] - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing_extensions.NotRequired[typing.Dict[str, str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py deleted file mode 100644 index f9254fa7..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider.py +++ /dev/null @@ -1,93 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, -) - - -class AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel - - -class AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams(typing_extensions.TypedDict): - type: typing.Literal["eleven_labs"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId - language: typing_extensions.NotRequired[str] - language_code: typing_extensions.NotRequired[str] - - -class AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams(typing_extensions.TypedDict): - type: typing.Literal["cartesia"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - - -class AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams(typing_extensions.TypedDict): - type: typing.Literal["open_ai"] - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion] - model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel - voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice - - -class AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams(typing_extensions.TypedDict): - type: typing.Literal["aws_polly"] - voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice - language: str - language_code: typing_extensions.NotRequired[str] - engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams - - -AgentV1SettingsAgentSpeakOneItemProviderParams = typing.Union[ - AgentV1SettingsAgentSpeakOneItemProvider_DeepgramParams, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabsParams, - AgentV1SettingsAgentSpeakOneItemProvider_CartesiaParams, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAiParams, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPollyParams, -] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py deleted file mode 100644 index 4e175186..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyParams(typing_extensions.TypedDict): - voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice - """ - AWS Polly voice name - """ - - language: str - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ - - engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py deleted file mode 100644 index 7d94447a..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsParams(typing_extensions.TypedDict): - type: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing_extensions.NotRequired[str] - """ - Required for STS only - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py deleted file mode 100644 index b2765075..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams(typing_extensions.TypedDict): - mode: str - """ - Cartesia voice mode - """ - - id: str - """ - Cartesia voice ID - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_open_ai.py deleted file mode 100644 index 364a3e80..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_speak_one_item_provider_open_ai.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderOpenAiParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion] - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel - """ - OpenAI TTS model - """ - - voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice - """ - OpenAI voice - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py index f7e29e1a..4e1e9533 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think.py @@ -3,8 +3,5 @@ import typing from ....requests.think_settings_v1 import ThinkSettingsV1Params -from .agent_v1settings_agent_think_one_item import AgentV1SettingsAgentThinkOneItemParams -AgentV1SettingsAgentThinkParams = typing.Union[ - ThinkSettingsV1Params, typing.Sequence[AgentV1SettingsAgentThinkOneItemParams] -] +AgentV1SettingsAgentThinkParams = typing.Union[ThinkSettingsV1Params, typing.Sequence[ThinkSettingsV1Params]] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item.py deleted file mode 100644 index e5102568..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions -from .agent_v1settings_agent_think_one_item_context_length import AgentV1SettingsAgentThinkOneItemContextLengthParams -from .agent_v1settings_agent_think_one_item_endpoint import AgentV1SettingsAgentThinkOneItemEndpointParams -from .agent_v1settings_agent_think_one_item_functions_item import AgentV1SettingsAgentThinkOneItemFunctionsItemParams -from .agent_v1settings_agent_think_one_item_provider import AgentV1SettingsAgentThinkOneItemProviderParams - - -class AgentV1SettingsAgentThinkOneItemParams(typing_extensions.TypedDict): - provider: AgentV1SettingsAgentThinkOneItemProviderParams - endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkOneItemEndpointParams] - """ - Optional for non-Deepgram LLM providers. When present, must include url field and headers object - """ - - functions: typing_extensions.NotRequired[typing.Sequence[AgentV1SettingsAgentThinkOneItemFunctionsItemParams]] - prompt: typing_extensions.NotRequired[str] - context_length: typing_extensions.NotRequired[AgentV1SettingsAgentThinkOneItemContextLengthParams] - """ - Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_context_length.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_context_length.py deleted file mode 100644 index 2709524b..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_context_length.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ..types.max import Max - -AgentV1SettingsAgentThinkOneItemContextLengthParams = typing.Union[Max, float] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_provider.py b/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_provider.py deleted file mode 100644 index 9725da31..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_provider.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ....types.anthropic import Anthropic -from ....types.aws_bedrock_think_provider import AwsBedrockThinkProvider -from ....types.google import Google -from ....types.groq import Groq -from ....types.open_ai_think_provider import OpenAiThinkProvider - -AgentV1SettingsAgentThinkOneItemProviderParams = typing.Union[ - OpenAiThinkProvider, AwsBedrockThinkProvider, Anthropic, Google, Groq -] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_applied.py b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py index e4502c84..32bca304 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_applied.py +++ b/src/deepgram/agent/v1/requests/agent_v1settings_applied.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1settings_applied_type import AgentV1SettingsAppliedType class AgentV1SettingsAppliedParams(typing_extensions.TypedDict): - type: AgentV1SettingsAppliedType + type: typing.Literal["SettingsApplied"] """ Message type identifier for settings applied confirmation """ diff --git a/src/deepgram/agent/v1/requests/agent_v1speak_updated.py b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py index c7cc28da..908d6639 100644 --- a/src/deepgram/agent/v1/requests/agent_v1speak_updated.py +++ b/src/deepgram/agent/v1/requests/agent_v1speak_updated.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1speak_updated_type import AgentV1SpeakUpdatedType class AgentV1SpeakUpdatedParams(typing_extensions.TypedDict): - type: AgentV1SpeakUpdatedType + type: typing.Literal["SpeakUpdated"] """ Message type identifier for speak update confirmation """ diff --git a/src/deepgram/agent/v1/requests/agent_v1think_updated.py b/src/deepgram/agent/v1/requests/agent_v1think_updated.py new file mode 100644 index 00000000..6fd752a5 --- /dev/null +++ b/src/deepgram/agent/v1/requests/agent_v1think_updated.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class AgentV1ThinkUpdatedParams(typing_extensions.TypedDict): + type: typing.Literal["ThinkUpdated"] + """ + Message type identifier for think update confirmation + """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_prompt.py b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py index bbe1eef0..8f363a56 100644 --- a/src/deepgram/agent/v1/requests/agent_v1update_prompt.py +++ b/src/deepgram/agent/v1/requests/agent_v1update_prompt.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1update_prompt_type import AgentV1UpdatePromptType class AgentV1UpdatePromptParams(typing_extensions.TypedDict): - type: AgentV1UpdatePromptType + type: typing.Literal["UpdatePrompt"] """ Message type identifier for prompt update request """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak.py index 23ffd54d..c740bd07 100644 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak.py +++ b/src/deepgram/agent/v1/requests/agent_v1update_speak.py @@ -1,12 +1,13 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1update_speak_type import AgentV1UpdateSpeakType from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeakParams class AgentV1UpdateSpeakParams(typing_extensions.TypedDict): - type: AgentV1UpdateSpeakType + type: typing.Literal["UpdateSpeak"] """ Message type identifier for updating the speak model """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py index c71e5376..b37eea79 100644 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py +++ b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak.py @@ -2,9 +2,6 @@ import typing -from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpointParams -from .agent_v1update_speak_speak_one_item import AgentV1UpdateSpeakSpeakOneItemParams +from ....requests.speak_settings_v1 import SpeakSettingsV1Params -AgentV1UpdateSpeakSpeakParams = typing.Union[ - AgentV1UpdateSpeakSpeakEndpointParams, typing.Sequence[AgentV1UpdateSpeakSpeakOneItemParams] -] +AgentV1UpdateSpeakSpeakParams = typing.Union[SpeakSettingsV1Params, typing.Sequence[SpeakSettingsV1Params]] diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py deleted file mode 100644 index c8fba1a8..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from .agent_v1update_speak_speak_endpoint_endpoint import AgentV1UpdateSpeakSpeakEndpointEndpointParams -from .agent_v1update_speak_speak_endpoint_provider import AgentV1UpdateSpeakSpeakEndpointProviderParams - - -class AgentV1UpdateSpeakSpeakEndpointParams(typing_extensions.TypedDict): - provider: AgentV1UpdateSpeakSpeakEndpointProviderParams - endpoint: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointEndpointParams] - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_endpoint.py deleted file mode 100644 index bfa152d1..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_endpoint.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import typing_extensions - - -class AgentV1UpdateSpeakSpeakEndpointEndpointParams(typing_extensions.TypedDict): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing_extensions.NotRequired[str] - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing_extensions.NotRequired[typing.Dict[str, str]] diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider.py deleted file mode 100644 index a9f5ec70..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider.py +++ /dev/null @@ -1,93 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams, -) - - -class AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion] - model: AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel - - -class AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams(typing_extensions.TypedDict): - type: typing.Literal["eleven_labs"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion] - model_id: AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId - language: typing_extensions.NotRequired[str] - language_code: typing_extensions.NotRequired[str] - - -class AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams(typing_extensions.TypedDict): - type: typing.Literal["cartesia"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion] - model_id: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId - voice: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - - -class AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams(typing_extensions.TypedDict): - type: typing.Literal["open_ai"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion] - model: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel - voice: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice - - -class AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams(typing_extensions.TypedDict): - type: typing.Literal["aws_polly"] - voice: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice - language: str - language_code: typing_extensions.NotRequired[str] - engine: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams - - -AgentV1UpdateSpeakSpeakEndpointProviderParams = typing.Union[ - AgentV1UpdateSpeakSpeakEndpointProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakEndpointProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAiParams, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPollyParams, -] diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly.py deleted file mode 100644 index ab5964ed..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyParams(typing_extensions.TypedDict): - voice: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice - """ - AWS Polly voice name - """ - - language: str - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ - - engine: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py deleted file mode 100644 index 3553faa1..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsParams(typing_extensions.TypedDict): - type: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing_extensions.NotRequired[str] - """ - Required for STS only - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia.py deleted file mode 100644 index fa84c7e7..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderCartesiaParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion] - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId - """ - Cartesia model ID - """ - - voice: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - """ - Cartesia language code - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py deleted file mode 100644 index 15e27850..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoiceParams(typing_extensions.TypedDict): - mode: str - """ - Cartesia voice mode - """ - - id: str - """ - Cartesia voice ID - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_deepgram.py deleted file mode 100644 index cb1fdb92..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_deepgram.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderDeepgramParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion] - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel - """ - Deepgram TTS model - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py deleted file mode 100644 index f6eccbdf..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion] - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId - """ - Eleven Labs model ID - """ - - language: typing_extensions.NotRequired[str] - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_open_ai.py deleted file mode 100644 index edf3daec..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_endpoint_provider_open_ai.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, -) -from ..types.agent_v1update_speak_speak_endpoint_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderOpenAiParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion] - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel - """ - OpenAI TTS model - """ - - voice: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice - """ - OpenAI voice - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item.py deleted file mode 100644 index 416e766a..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from .agent_v1update_speak_speak_one_item_endpoint import AgentV1UpdateSpeakSpeakOneItemEndpointParams -from .agent_v1update_speak_speak_one_item_provider import AgentV1UpdateSpeakSpeakOneItemProviderParams - - -class AgentV1UpdateSpeakSpeakOneItemParams(typing_extensions.TypedDict): - provider: AgentV1UpdateSpeakSpeakOneItemProviderParams - endpoint: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemEndpointParams] - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider.py deleted file mode 100644 index bb8f3bc7..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider.py +++ /dev/null @@ -1,93 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, -) -from ..types.agent_v1update_speak_speak_one_item_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, -) -from ..types.agent_v1update_speak_speak_one_item_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, -) -from ..types.agent_v1update_speak_speak_one_item_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, -) -from ..types.agent_v1update_speak_speak_one_item_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, -) -from ..types.agent_v1update_speak_speak_one_item_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, -) -from ..types.agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, -) -from ..types.agent_v1update_speak_speak_one_item_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, -) -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, -) -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, -) -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams, -) - - -class AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams(typing_extensions.TypedDict): - type: typing.Literal["deepgram"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion] - model: AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel - - -class AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams(typing_extensions.TypedDict): - type: typing.Literal["eleven_labs"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion] - model_id: AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId - language: typing_extensions.NotRequired[str] - language_code: typing_extensions.NotRequired[str] - - -class AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams(typing_extensions.TypedDict): - type: typing.Literal["cartesia"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion] - model_id: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId - voice: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - - -class AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams(typing_extensions.TypedDict): - type: typing.Literal["open_ai"] - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion] - model: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel - voice: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice - - -class AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams(typing_extensions.TypedDict): - type: typing.Literal["aws_polly"] - voice: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice - language: str - language_code: typing_extensions.NotRequired[str] - engine: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams - - -AgentV1UpdateSpeakSpeakOneItemProviderParams = typing.Union[ - AgentV1UpdateSpeakSpeakOneItemProvider_DeepgramParams, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabsParams, - AgentV1UpdateSpeakSpeakOneItemProvider_CartesiaParams, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAiParams, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPollyParams, -] diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly.py deleted file mode 100644 index b79609a0..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, -) -from ..types.agent_v1update_speak_speak_one_item_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyParams(typing_extensions.TypedDict): - voice: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice - """ - AWS Polly voice name - """ - - language: str - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ - - engine: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py deleted file mode 100644 index f47f7a00..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsParams(typing_extensions.TypedDict): - type: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing_extensions.NotRequired[str] - """ - Required for STS only - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia.py deleted file mode 100644 index d07ba108..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, -) -from ..types.agent_v1update_speak_speak_one_item_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderCartesiaParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion] - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId - """ - Cartesia model ID - """ - - voice: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - """ - Cartesia language code - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_deepgram.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_deepgram.py deleted file mode 100644 index 73e08249..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_deepgram.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, -) -from ..types.agent_v1update_speak_speak_one_item_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderDeepgramParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion] - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel - """ - Deepgram TTS model - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_eleven_labs.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_eleven_labs.py deleted file mode 100644 index 1906bbe1..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_eleven_labs.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, -) -from ..types.agent_v1update_speak_speak_one_item_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion] - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId - """ - Eleven Labs model ID - """ - - language: typing_extensions.NotRequired[str] - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_open_ai.py b/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_open_ai.py deleted file mode 100644 index 2dbd8cb9..00000000 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_open_ai.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, -) -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, -) -from ..types.agent_v1update_speak_speak_one_item_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderOpenAiParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion] - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel - """ - OpenAI TTS model - """ - - voice: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice - """ - OpenAI voice - """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_think.py b/src/deepgram/agent/v1/requests/agent_v1update_think.py new file mode 100644 index 00000000..41e6e13e --- /dev/null +++ b/src/deepgram/agent/v1/requests/agent_v1update_think.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .agent_v1update_think_think import AgentV1UpdateThinkThinkParams + + +class AgentV1UpdateThinkParams(typing_extensions.TypedDict): + type: typing.Literal["UpdateThink"] + """ + Message type identifier for updating the think model + """ + + think: AgentV1UpdateThinkThinkParams diff --git a/src/deepgram/agent/v1/requests/agent_v1update_think_think.py b/src/deepgram/agent/v1/requests/agent_v1update_think_think.py new file mode 100644 index 00000000..ffa3ce71 --- /dev/null +++ b/src/deepgram/agent/v1/requests/agent_v1update_think_think.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ....requests.think_settings_v1 import ThinkSettingsV1Params + +AgentV1UpdateThinkThinkParams = typing.Union[ThinkSettingsV1Params, typing.Sequence[ThinkSettingsV1Params]] diff --git a/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py index 77fb500e..c883119c 100644 --- a/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py +++ b/src/deepgram/agent/v1/requests/agent_v1user_started_speaking.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1user_started_speaking_type import AgentV1UserStartedSpeakingType class AgentV1UserStartedSpeakingParams(typing_extensions.TypedDict): - type: AgentV1UserStartedSpeakingType + type: typing.Literal["UserStartedSpeaking"] """ Message type identifier indicating that the user has begun speaking """ diff --git a/src/deepgram/agent/v1/requests/agent_v1warning.py b/src/deepgram/agent/v1/requests/agent_v1warning.py index 0afeaedb..f1e75051 100644 --- a/src/deepgram/agent/v1/requests/agent_v1warning.py +++ b/src/deepgram/agent/v1/requests/agent_v1warning.py @@ -1,7 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1warning_type import AgentV1WarningType class AgentV1WarningParams(typing_extensions.TypedDict): @@ -9,7 +10,7 @@ class AgentV1WarningParams(typing_extensions.TypedDict): Notifies the client of non-fatal errors or warnings """ - type: AgentV1WarningType + type: typing.Literal["Warning"] """ Message type identifier for warnings """ diff --git a/src/deepgram/agent/v1/requests/agent_v1welcome.py b/src/deepgram/agent/v1/requests/agent_v1welcome.py index 9804dc6f..5168a4f0 100644 --- a/src/deepgram/agent/v1/requests/agent_v1welcome.py +++ b/src/deepgram/agent/v1/requests/agent_v1welcome.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_v1welcome_type import AgentV1WelcomeType class AgentV1WelcomeParams(typing_extensions.TypedDict): - type: AgentV1WelcomeType + type: typing.Literal["Welcome"] """ Message type identifier for welcome message """ diff --git a/src/deepgram/agent/v1/requests/cartesia.py b/src/deepgram/agent/v1/requests/cartesia.py deleted file mode 100644 index 9e1d3db2..00000000 --- a/src/deepgram/agent/v1/requests/cartesia.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams, -) - - -class CartesiaParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId - """ - Cartesia model ID - """ - - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoiceParams - language: typing_extensions.NotRequired[str] - """ - Cartesia language code - """ diff --git a/src/deepgram/agent/v1/requests/deepgram.py b/src/deepgram/agent/v1/requests/deepgram.py deleted file mode 100644 index bafefe78..00000000 --- a/src/deepgram/agent/v1/requests/deepgram.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) - - -class DeepgramParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel - """ - Deepgram TTS model - """ diff --git a/src/deepgram/agent/v1/requests/eleven_labs.py b/src/deepgram/agent/v1/requests/eleven_labs.py deleted file mode 100644 index 083e2c33..00000000 --- a/src/deepgram/agent/v1/requests/eleven_labs.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from ..types.agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) - - -class ElevenLabsParams(typing_extensions.TypedDict): - version: typing_extensions.NotRequired[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId - """ - Eleven Labs model ID - """ - - language: typing_extensions.NotRequired[str] - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing_extensions.NotRequired[str] - """ - Use the `language` field instead. - """ diff --git a/src/deepgram/agent/v1/socket_client.py b/src/deepgram/agent/v1/socket_client.py index ce3e4aa1..3d455f7f 100644 --- a/src/deepgram/agent/v1/socket_client.py +++ b/src/deepgram/agent/v1/socket_client.py @@ -25,8 +25,10 @@ from .types.agent_v1settings import AgentV1Settings from .types.agent_v1settings_applied import AgentV1SettingsApplied from .types.agent_v1speak_updated import AgentV1SpeakUpdated +from .types.agent_v1think_updated import AgentV1ThinkUpdated from .types.agent_v1update_prompt import AgentV1UpdatePrompt from .types.agent_v1update_speak import AgentV1UpdateSpeak +from .types.agent_v1update_think import AgentV1UpdateThink from .types.agent_v1user_started_speaking import AgentV1UserStartedSpeaking from .types.agent_v1warning import AgentV1Warning from .types.agent_v1welcome import AgentV1Welcome @@ -37,30 +39,11 @@ from websockets import WebSocketClientProtocol # type: ignore _logger = logging.getLogger(__name__) - - -def _sanitize_numeric_types(obj: typing.Any) -> typing.Any: - """ - Recursively convert float values that are whole numbers to int. - - Workaround for Fern-generated models that type integer API fields - (like sample_rate) as float, causing JSON serialization to produce - values like 44100.0 instead of 44100. The Deepgram API rejects - float representations of integer fields. - - See: https://github.com/deepgram/internal-api-specs/issues/205 - """ - if isinstance(obj, dict): - return {k: _sanitize_numeric_types(v) for k, v in obj.items()} - elif isinstance(obj, list): - return [_sanitize_numeric_types(item) for item in obj] - elif isinstance(obj, float) and obj.is_integer(): - return int(obj) - return obj V1SocketClientResponse = typing.Union[ AgentV1ReceiveFunctionCallResponse, AgentV1PromptUpdated, AgentV1SpeakUpdated, + AgentV1ThinkUpdated, AgentV1InjectionRefused, AgentV1Welcome, AgentV1SettingsApplied, @@ -87,7 +70,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -112,14 +95,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -159,12 +142,12 @@ async def send_function_call_response(self, message: AgentV1SendFunctionCallResp """ await self._send_model(message) - async def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: + async def send_keep_alive(self, message: AgentV1KeepAlive) -> None: """ Send a message to the websocket connection. The message will be sent as a AgentV1KeepAlive. """ - await self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) + await self._send_model(message) async def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ @@ -173,6 +156,13 @@ async def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ await self._send_model(message) + async def send_update_think(self, message: AgentV1UpdateThink) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdateThink. + """ + await self._send_model(message) + async def send_media(self, message: bytes) -> None: """ Send a message to the websocket connection. @@ -189,7 +179,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -206,7 +196,7 @@ async def _send_model(self, data: typing.Any) -> None: """ Send a Pydantic model to the websocket connection. """ - await self._send(_sanitize_numeric_types(data.dict())) + await self._send(data.dict()) class V1SocketClient(EventEmitterMixin): @@ -220,7 +210,7 @@ def __iter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -245,14 +235,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -292,12 +282,12 @@ def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) """ self._send_model(message) - def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: + def send_keep_alive(self, message: AgentV1KeepAlive) -> None: """ Send a message to the websocket connection. The message will be sent as a AgentV1KeepAlive. """ - self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) + self._send_model(message) def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ @@ -306,6 +296,13 @@ def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ self._send_model(message) + def send_update_think(self, message: AgentV1UpdateThink) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a AgentV1UpdateThink. + """ + self._send_model(message) + def send_media(self, message: bytes) -> None: """ Send a message to the websocket connection. @@ -322,7 +319,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -339,4 +336,4 @@ def _send_model(self, data: typing.Any) -> None: """ Send a Pydantic model to the websocket connection. """ - self._send(_sanitize_numeric_types(data.dict())) + self._send(data.dict()) diff --git a/src/deepgram/agent/v1/types/__init__.py b/src/deepgram/agent/v1/types/__init__.py index 58919d1e..89a1ee1d 100644 --- a/src/deepgram/agent/v1/types/__init__.py +++ b/src/deepgram/agent/v1/types/__init__.py @@ -7,33 +7,20 @@ if typing.TYPE_CHECKING: from .agent_v1agent_audio_done import AgentV1AgentAudioDone - from .agent_v1agent_audio_done_type import AgentV1AgentAudioDoneType from .agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking - from .agent_v1agent_started_speaking_type import AgentV1AgentStartedSpeakingType from .agent_v1agent_thinking import AgentV1AgentThinking - from .agent_v1agent_thinking_type import AgentV1AgentThinkingType from .agent_v1conversation_text import AgentV1ConversationText from .agent_v1conversation_text_role import AgentV1ConversationTextRole - from .agent_v1conversation_text_type import AgentV1ConversationTextType from .agent_v1error import AgentV1Error - from .agent_v1error_type import AgentV1ErrorType from .agent_v1function_call_request import AgentV1FunctionCallRequest from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem - from .agent_v1function_call_request_type import AgentV1FunctionCallRequestType from .agent_v1inject_agent_message import AgentV1InjectAgentMessage - from .agent_v1inject_agent_message_type import AgentV1InjectAgentMessageType from .agent_v1inject_user_message import AgentV1InjectUserMessage - from .agent_v1inject_user_message_type import AgentV1InjectUserMessageType from .agent_v1injection_refused import AgentV1InjectionRefused - from .agent_v1injection_refused_type import AgentV1InjectionRefusedType from .agent_v1keep_alive import AgentV1KeepAlive - from .agent_v1keep_alive_type import AgentV1KeepAliveType from .agent_v1prompt_updated import AgentV1PromptUpdated - from .agent_v1prompt_updated_type import AgentV1PromptUpdatedType from .agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse - from .agent_v1receive_function_call_response_type import AgentV1ReceiveFunctionCallResponseType from .agent_v1send_function_call_response import AgentV1SendFunctionCallResponse - from .agent_v1send_function_call_response_type import AgentV1SendFunctionCallResponseType from .agent_v1settings import AgentV1Settings from .agent_v1settings_agent import AgentV1SettingsAgent from .agent_v1settings_agent_context import AgentV1SettingsAgentContext @@ -42,18 +29,12 @@ from .agent_v1settings_agent_context_messages_item_content_role import ( AgentV1SettingsAgentContextMessagesItemContentRole, ) - from .agent_v1settings_agent_context_messages_item_content_type import ( - AgentV1SettingsAgentContextMessagesItemContentType, - ) from .agent_v1settings_agent_context_messages_item_function_calls import ( AgentV1SettingsAgentContextMessagesItemFunctionCalls, ) from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import ( AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem, ) - from .agent_v1settings_agent_context_messages_item_function_calls_type import ( - AgentV1SettingsAgentContextMessagesItemFunctionCallsType, - ) from .agent_v1settings_agent_listen import AgentV1SettingsAgentListen from .agent_v1settings_agent_listen_provider import ( AgentV1SettingsAgentListenProvider, @@ -61,458 +42,75 @@ AgentV1SettingsAgentListenProvider_V2, ) from .agent_v1settings_agent_listen_provider_v1 import AgentV1SettingsAgentListenProviderV1 - from .agent_v1settings_agent_listen_provider_v1type import AgentV1SettingsAgentListenProviderV1Type from .agent_v1settings_agent_listen_provider_v2 import AgentV1SettingsAgentListenProviderV2 - from .agent_v1settings_agent_listen_provider_v2type import AgentV1SettingsAgentListenProviderV2Type from .agent_v1settings_agent_speak import AgentV1SettingsAgentSpeak - from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint - from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint - from .agent_v1settings_agent_speak_endpoint_provider import ( - AgentV1SettingsAgentSpeakEndpointProvider, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly, - AgentV1SettingsAgentSpeakEndpointProvider_Cartesia, - AgentV1SettingsAgentSpeakEndpointProvider_Deepgram, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAi, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPolly, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, - ) - from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, - ) - from .agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId, - ) - from .agent_v1settings_agent_speak_endpoint_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion, - ) - from .agent_v1settings_agent_speak_endpoint_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice, - ) - from .agent_v1settings_agent_speak_endpoint_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel, - ) - from .agent_v1settings_agent_speak_endpoint_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion, - ) - from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId, - ) - from .agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion, - ) - from .agent_v1settings_agent_speak_endpoint_provider_open_ai import AgentV1SettingsAgentSpeakEndpointProviderOpenAi - from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, - ) - from .agent_v1settings_agent_speak_endpoint_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, - ) - from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, - ) - from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItem - from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpoint - from .agent_v1settings_agent_speak_one_item_provider import ( - AgentV1SettingsAgentSpeakOneItemProvider, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly, - AgentV1SettingsAgentSpeakOneItemProvider_Cartesia, - AgentV1SettingsAgentSpeakOneItemProvider_Deepgram, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAi, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPolly, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, - ) - from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, - ) - from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, - ) - from .agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, - ) - from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, - ) - from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, - ) - from .agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, - ) - from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, - ) - from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, - ) - from .agent_v1settings_agent_speak_one_item_provider_open_ai import AgentV1SettingsAgentSpeakOneItemProviderOpenAi - from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, - ) - from .agent_v1settings_agent_speak_one_item_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, - ) - from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, - ) from .agent_v1settings_agent_think import AgentV1SettingsAgentThink - from .agent_v1settings_agent_think_one_item import AgentV1SettingsAgentThinkOneItem - from .agent_v1settings_agent_think_one_item_context_length import AgentV1SettingsAgentThinkOneItemContextLength - from .agent_v1settings_agent_think_one_item_endpoint import AgentV1SettingsAgentThinkOneItemEndpoint - from .agent_v1settings_agent_think_one_item_functions_item import AgentV1SettingsAgentThinkOneItemFunctionsItem - from .agent_v1settings_agent_think_one_item_functions_item_endpoint import ( - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint, - ) - from .agent_v1settings_agent_think_one_item_provider import AgentV1SettingsAgentThinkOneItemProvider from .agent_v1settings_applied import AgentV1SettingsApplied - from .agent_v1settings_applied_type import AgentV1SettingsAppliedType from .agent_v1settings_audio import AgentV1SettingsAudio from .agent_v1settings_audio_input import AgentV1SettingsAudioInput from .agent_v1settings_audio_input_encoding import AgentV1SettingsAudioInputEncoding from .agent_v1settings_audio_output import AgentV1SettingsAudioOutput from .agent_v1settings_audio_output_encoding import AgentV1SettingsAudioOutputEncoding from .agent_v1settings_flags import AgentV1SettingsFlags - from .agent_v1settings_type import AgentV1SettingsType from .agent_v1speak_updated import AgentV1SpeakUpdated - from .agent_v1speak_updated_type import AgentV1SpeakUpdatedType + from .agent_v1think_updated import AgentV1ThinkUpdated from .agent_v1update_prompt import AgentV1UpdatePrompt - from .agent_v1update_prompt_type import AgentV1UpdatePromptType from .agent_v1update_speak import AgentV1UpdateSpeak from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak - from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint - from .agent_v1update_speak_speak_endpoint_endpoint import AgentV1UpdateSpeakSpeakEndpointEndpoint - from .agent_v1update_speak_speak_endpoint_provider import ( - AgentV1UpdateSpeakSpeakEndpointProvider, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly, - AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia, - AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly import AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly - from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, - ) - from .agent_v1update_speak_speak_endpoint_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, - ) - from .agent_v1update_speak_speak_endpoint_provider_cartesia import AgentV1UpdateSpeakSpeakEndpointProviderCartesia - from .agent_v1update_speak_speak_endpoint_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, - ) - from .agent_v1update_speak_speak_endpoint_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, - ) - from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice, - ) - from .agent_v1update_speak_speak_endpoint_provider_deepgram import AgentV1UpdateSpeakSpeakEndpointProviderDeepgram - from .agent_v1update_speak_speak_endpoint_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, - ) - from .agent_v1update_speak_speak_endpoint_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, - ) - from .agent_v1update_speak_speak_endpoint_provider_eleven_labs import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs, - ) - from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, - ) - from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, - ) - from .agent_v1update_speak_speak_endpoint_provider_open_ai import AgentV1UpdateSpeakSpeakEndpointProviderOpenAi - from .agent_v1update_speak_speak_endpoint_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, - ) - from .agent_v1update_speak_speak_endpoint_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, - ) - from .agent_v1update_speak_speak_endpoint_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, - ) - from .agent_v1update_speak_speak_one_item import AgentV1UpdateSpeakSpeakOneItem - from .agent_v1update_speak_speak_one_item_endpoint import AgentV1UpdateSpeakSpeakOneItemEndpoint - from .agent_v1update_speak_speak_one_item_provider import ( - AgentV1UpdateSpeakSpeakOneItemProvider, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly, - AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia, - AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly import AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly - from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, - ) - from .agent_v1update_speak_speak_one_item_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, - ) - from .agent_v1update_speak_speak_one_item_provider_cartesia import AgentV1UpdateSpeakSpeakOneItemProviderCartesia - from .agent_v1update_speak_speak_one_item_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, - ) - from .agent_v1update_speak_speak_one_item_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, - ) - from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice, - ) - from .agent_v1update_speak_speak_one_item_provider_deepgram import AgentV1UpdateSpeakSpeakOneItemProviderDeepgram - from .agent_v1update_speak_speak_one_item_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, - ) - from .agent_v1update_speak_speak_one_item_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, - ) - from .agent_v1update_speak_speak_one_item_provider_eleven_labs import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs, - ) - from .agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, - ) - from .agent_v1update_speak_speak_one_item_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, - ) - from .agent_v1update_speak_speak_one_item_provider_open_ai import AgentV1UpdateSpeakSpeakOneItemProviderOpenAi - from .agent_v1update_speak_speak_one_item_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, - ) - from .agent_v1update_speak_speak_one_item_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, - ) - from .agent_v1update_speak_speak_one_item_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, - ) - from .agent_v1update_speak_type import AgentV1UpdateSpeakType + from .agent_v1update_think import AgentV1UpdateThink + from .agent_v1update_think_think import AgentV1UpdateThinkThink from .agent_v1user_started_speaking import AgentV1UserStartedSpeaking - from .agent_v1user_started_speaking_type import AgentV1UserStartedSpeakingType from .agent_v1warning import AgentV1Warning - from .agent_v1warning_type import AgentV1WarningType from .agent_v1welcome import AgentV1Welcome - from .agent_v1welcome_type import AgentV1WelcomeType - from .cartesia import Cartesia - from .deepgram import Deepgram - from .eleven_labs import ElevenLabs - from .max import Max _dynamic_imports: typing.Dict[str, str] = { "AgentV1AgentAudioDone": ".agent_v1agent_audio_done", - "AgentV1AgentAudioDoneType": ".agent_v1agent_audio_done_type", "AgentV1AgentStartedSpeaking": ".agent_v1agent_started_speaking", - "AgentV1AgentStartedSpeakingType": ".agent_v1agent_started_speaking_type", "AgentV1AgentThinking": ".agent_v1agent_thinking", - "AgentV1AgentThinkingType": ".agent_v1agent_thinking_type", "AgentV1ConversationText": ".agent_v1conversation_text", "AgentV1ConversationTextRole": ".agent_v1conversation_text_role", - "AgentV1ConversationTextType": ".agent_v1conversation_text_type", "AgentV1Error": ".agent_v1error", - "AgentV1ErrorType": ".agent_v1error_type", "AgentV1FunctionCallRequest": ".agent_v1function_call_request", "AgentV1FunctionCallRequestFunctionsItem": ".agent_v1function_call_request_functions_item", - "AgentV1FunctionCallRequestType": ".agent_v1function_call_request_type", "AgentV1InjectAgentMessage": ".agent_v1inject_agent_message", - "AgentV1InjectAgentMessageType": ".agent_v1inject_agent_message_type", "AgentV1InjectUserMessage": ".agent_v1inject_user_message", - "AgentV1InjectUserMessageType": ".agent_v1inject_user_message_type", "AgentV1InjectionRefused": ".agent_v1injection_refused", - "AgentV1InjectionRefusedType": ".agent_v1injection_refused_type", "AgentV1KeepAlive": ".agent_v1keep_alive", - "AgentV1KeepAliveType": ".agent_v1keep_alive_type", "AgentV1PromptUpdated": ".agent_v1prompt_updated", - "AgentV1PromptUpdatedType": ".agent_v1prompt_updated_type", "AgentV1ReceiveFunctionCallResponse": ".agent_v1receive_function_call_response", - "AgentV1ReceiveFunctionCallResponseType": ".agent_v1receive_function_call_response_type", "AgentV1SendFunctionCallResponse": ".agent_v1send_function_call_response", - "AgentV1SendFunctionCallResponseType": ".agent_v1send_function_call_response_type", "AgentV1Settings": ".agent_v1settings", "AgentV1SettingsAgent": ".agent_v1settings_agent", "AgentV1SettingsAgentContext": ".agent_v1settings_agent_context", "AgentV1SettingsAgentContextMessagesItem": ".agent_v1settings_agent_context_messages_item", "AgentV1SettingsAgentContextMessagesItemContent": ".agent_v1settings_agent_context_messages_item_content", "AgentV1SettingsAgentContextMessagesItemContentRole": ".agent_v1settings_agent_context_messages_item_content_role", - "AgentV1SettingsAgentContextMessagesItemContentType": ".agent_v1settings_agent_context_messages_item_content_type", "AgentV1SettingsAgentContextMessagesItemFunctionCalls": ".agent_v1settings_agent_context_messages_item_function_calls", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem": ".agent_v1settings_agent_context_messages_item_function_calls_function_calls_item", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType": ".agent_v1settings_agent_context_messages_item_function_calls_type", "AgentV1SettingsAgentListen": ".agent_v1settings_agent_listen", "AgentV1SettingsAgentListenProvider": ".agent_v1settings_agent_listen_provider", "AgentV1SettingsAgentListenProviderV1": ".agent_v1settings_agent_listen_provider_v1", - "AgentV1SettingsAgentListenProviderV1Type": ".agent_v1settings_agent_listen_provider_v1type", "AgentV1SettingsAgentListenProviderV2": ".agent_v1settings_agent_listen_provider_v2", - "AgentV1SettingsAgentListenProviderV2Type": ".agent_v1settings_agent_listen_provider_v2type", "AgentV1SettingsAgentListenProvider_V1": ".agent_v1settings_agent_listen_provider", "AgentV1SettingsAgentListenProvider_V2": ".agent_v1settings_agent_listen_provider", "AgentV1SettingsAgentSpeak": ".agent_v1settings_agent_speak", - "AgentV1SettingsAgentSpeakEndpoint": ".agent_v1settings_agent_speak_endpoint", - "AgentV1SettingsAgentSpeakEndpointEndpoint": ".agent_v1settings_agent_speak_endpoint_endpoint", - "AgentV1SettingsAgentSpeakEndpointProvider": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice": ".agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_version", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice": ".agent_v1settings_agent_speak_endpoint_provider_cartesia_voice", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel": ".agent_v1settings_agent_speak_endpoint_provider_deepgram_model", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion": ".agent_v1settings_agent_speak_endpoint_provider_deepgram_version", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion": ".agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi": ".agent_v1settings_agent_speak_endpoint_provider_open_ai", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_model", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_version", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice": ".agent_v1settings_agent_speak_endpoint_provider_open_ai_voice", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi": ".agent_v1settings_agent_speak_endpoint_provider", - "AgentV1SettingsAgentSpeakOneItem": ".agent_v1settings_agent_speak_one_item", - "AgentV1SettingsAgentSpeakOneItemEndpoint": ".agent_v1settings_agent_speak_one_item_endpoint", - "AgentV1SettingsAgentSpeakOneItemProvider": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly": ".agent_v1settings_agent_speak_one_item_provider_aws_polly", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_engine", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice": ".agent_v1settings_agent_speak_one_item_provider_aws_polly_voice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId": ".agent_v1settings_agent_speak_one_item_provider_cartesia_model_id", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion": ".agent_v1settings_agent_speak_one_item_provider_cartesia_version", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice": ".agent_v1settings_agent_speak_one_item_provider_cartesia_voice", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel": ".agent_v1settings_agent_speak_one_item_provider_deepgram_model", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion": ".agent_v1settings_agent_speak_one_item_provider_deepgram_version", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId": ".agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion": ".agent_v1settings_agent_speak_one_item_provider_eleven_labs_version", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi": ".agent_v1settings_agent_speak_one_item_provider_open_ai", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel": ".agent_v1settings_agent_speak_one_item_provider_open_ai_model", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion": ".agent_v1settings_agent_speak_one_item_provider_open_ai_version", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice": ".agent_v1settings_agent_speak_one_item_provider_open_ai_voice", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs": ".agent_v1settings_agent_speak_one_item_provider", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi": ".agent_v1settings_agent_speak_one_item_provider", "AgentV1SettingsAgentThink": ".agent_v1settings_agent_think", - "AgentV1SettingsAgentThinkOneItem": ".agent_v1settings_agent_think_one_item", - "AgentV1SettingsAgentThinkOneItemContextLength": ".agent_v1settings_agent_think_one_item_context_length", - "AgentV1SettingsAgentThinkOneItemEndpoint": ".agent_v1settings_agent_think_one_item_endpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItem": ".agent_v1settings_agent_think_one_item_functions_item", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint": ".agent_v1settings_agent_think_one_item_functions_item_endpoint", - "AgentV1SettingsAgentThinkOneItemProvider": ".agent_v1settings_agent_think_one_item_provider", "AgentV1SettingsApplied": ".agent_v1settings_applied", - "AgentV1SettingsAppliedType": ".agent_v1settings_applied_type", "AgentV1SettingsAudio": ".agent_v1settings_audio", "AgentV1SettingsAudioInput": ".agent_v1settings_audio_input", "AgentV1SettingsAudioInputEncoding": ".agent_v1settings_audio_input_encoding", "AgentV1SettingsAudioOutput": ".agent_v1settings_audio_output", "AgentV1SettingsAudioOutputEncoding": ".agent_v1settings_audio_output_encoding", "AgentV1SettingsFlags": ".agent_v1settings_flags", - "AgentV1SettingsType": ".agent_v1settings_type", "AgentV1SpeakUpdated": ".agent_v1speak_updated", - "AgentV1SpeakUpdatedType": ".agent_v1speak_updated_type", + "AgentV1ThinkUpdated": ".agent_v1think_updated", "AgentV1UpdatePrompt": ".agent_v1update_prompt", - "AgentV1UpdatePromptType": ".agent_v1update_prompt_type", "AgentV1UpdateSpeak": ".agent_v1update_speak", "AgentV1UpdateSpeakSpeak": ".agent_v1update_speak_speak", - "AgentV1UpdateSpeakSpeakEndpoint": ".agent_v1update_speak_speak_endpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpoint": ".agent_v1update_speak_speak_endpoint_endpoint", - "AgentV1UpdateSpeakSpeakEndpointProvider": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly": ".agent_v1update_speak_speak_endpoint_provider_aws_polly", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials": ".agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType": ".agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine": ".agent_v1update_speak_speak_endpoint_provider_aws_polly_engine", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice": ".agent_v1update_speak_speak_endpoint_provider_aws_polly_voice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia": ".agent_v1update_speak_speak_endpoint_provider_cartesia", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId": ".agent_v1update_speak_speak_endpoint_provider_cartesia_model_id", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion": ".agent_v1update_speak_speak_endpoint_provider_cartesia_version", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice": ".agent_v1update_speak_speak_endpoint_provider_cartesia_voice", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram": ".agent_v1update_speak_speak_endpoint_provider_deepgram", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel": ".agent_v1update_speak_speak_endpoint_provider_deepgram_model", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion": ".agent_v1update_speak_speak_endpoint_provider_deepgram_version", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs": ".agent_v1update_speak_speak_endpoint_provider_eleven_labs", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId": ".agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion": ".agent_v1update_speak_speak_endpoint_provider_eleven_labs_version", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi": ".agent_v1update_speak_speak_endpoint_provider_open_ai", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel": ".agent_v1update_speak_speak_endpoint_provider_open_ai_model", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion": ".agent_v1update_speak_speak_endpoint_provider_open_ai_version", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice": ".agent_v1update_speak_speak_endpoint_provider_open_ai_voice", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi": ".agent_v1update_speak_speak_endpoint_provider", - "AgentV1UpdateSpeakSpeakOneItem": ".agent_v1update_speak_speak_one_item", - "AgentV1UpdateSpeakSpeakOneItemEndpoint": ".agent_v1update_speak_speak_one_item_endpoint", - "AgentV1UpdateSpeakSpeakOneItemProvider": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly": ".agent_v1update_speak_speak_one_item_provider_aws_polly", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials": ".agent_v1update_speak_speak_one_item_provider_aws_polly_credentials", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType": ".agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine": ".agent_v1update_speak_speak_one_item_provider_aws_polly_engine", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice": ".agent_v1update_speak_speak_one_item_provider_aws_polly_voice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia": ".agent_v1update_speak_speak_one_item_provider_cartesia", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId": ".agent_v1update_speak_speak_one_item_provider_cartesia_model_id", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion": ".agent_v1update_speak_speak_one_item_provider_cartesia_version", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice": ".agent_v1update_speak_speak_one_item_provider_cartesia_voice", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram": ".agent_v1update_speak_speak_one_item_provider_deepgram", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel": ".agent_v1update_speak_speak_one_item_provider_deepgram_model", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion": ".agent_v1update_speak_speak_one_item_provider_deepgram_version", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs": ".agent_v1update_speak_speak_one_item_provider_eleven_labs", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId": ".agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion": ".agent_v1update_speak_speak_one_item_provider_eleven_labs_version", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi": ".agent_v1update_speak_speak_one_item_provider_open_ai", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel": ".agent_v1update_speak_speak_one_item_provider_open_ai_model", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion": ".agent_v1update_speak_speak_one_item_provider_open_ai_version", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice": ".agent_v1update_speak_speak_one_item_provider_open_ai_voice", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi": ".agent_v1update_speak_speak_one_item_provider", - "AgentV1UpdateSpeakType": ".agent_v1update_speak_type", + "AgentV1UpdateThink": ".agent_v1update_think", + "AgentV1UpdateThinkThink": ".agent_v1update_think_think", "AgentV1UserStartedSpeaking": ".agent_v1user_started_speaking", - "AgentV1UserStartedSpeakingType": ".agent_v1user_started_speaking_type", "AgentV1Warning": ".agent_v1warning", - "AgentV1WarningType": ".agent_v1warning_type", "AgentV1Welcome": ".agent_v1welcome", - "AgentV1WelcomeType": ".agent_v1welcome_type", - "Cartesia": ".cartesia", - "Deepgram": ".deepgram", - "ElevenLabs": ".eleven_labs", - "Max": ".max", } @@ -539,185 +137,51 @@ def __dir__(): __all__ = [ "AgentV1AgentAudioDone", - "AgentV1AgentAudioDoneType", "AgentV1AgentStartedSpeaking", - "AgentV1AgentStartedSpeakingType", "AgentV1AgentThinking", - "AgentV1AgentThinkingType", "AgentV1ConversationText", "AgentV1ConversationTextRole", - "AgentV1ConversationTextType", "AgentV1Error", - "AgentV1ErrorType", "AgentV1FunctionCallRequest", "AgentV1FunctionCallRequestFunctionsItem", - "AgentV1FunctionCallRequestType", "AgentV1InjectAgentMessage", - "AgentV1InjectAgentMessageType", "AgentV1InjectUserMessage", - "AgentV1InjectUserMessageType", "AgentV1InjectionRefused", - "AgentV1InjectionRefusedType", "AgentV1KeepAlive", - "AgentV1KeepAliveType", "AgentV1PromptUpdated", - "AgentV1PromptUpdatedType", "AgentV1ReceiveFunctionCallResponse", - "AgentV1ReceiveFunctionCallResponseType", "AgentV1SendFunctionCallResponse", - "AgentV1SendFunctionCallResponseType", "AgentV1Settings", "AgentV1SettingsAgent", "AgentV1SettingsAgentContext", "AgentV1SettingsAgentContextMessagesItem", "AgentV1SettingsAgentContextMessagesItemContent", "AgentV1SettingsAgentContextMessagesItemContentRole", - "AgentV1SettingsAgentContextMessagesItemContentType", "AgentV1SettingsAgentContextMessagesItemFunctionCalls", "AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem", - "AgentV1SettingsAgentContextMessagesItemFunctionCallsType", "AgentV1SettingsAgentListen", "AgentV1SettingsAgentListenProvider", "AgentV1SettingsAgentListenProviderV1", - "AgentV1SettingsAgentListenProviderV1Type", "AgentV1SettingsAgentListenProviderV2", - "AgentV1SettingsAgentListenProviderV2Type", "AgentV1SettingsAgentListenProvider_V1", "AgentV1SettingsAgentListenProvider_V2", "AgentV1SettingsAgentSpeak", - "AgentV1SettingsAgentSpeakEndpoint", - "AgentV1SettingsAgentSpeakEndpointEndpoint", - "AgentV1SettingsAgentSpeakEndpointProvider", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPolly", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel", - "AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAi", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly", - "AgentV1SettingsAgentSpeakEndpointProvider_Cartesia", - "AgentV1SettingsAgentSpeakEndpointProvider_Deepgram", - "AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakEndpointProvider_OpenAi", - "AgentV1SettingsAgentSpeakOneItem", - "AgentV1SettingsAgentSpeakOneItemEndpoint", - "AgentV1SettingsAgentSpeakOneItemProvider", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPolly", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine", - "AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion", - "AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel", - "AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId", - "AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAi", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion", - "AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice", - "AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly", - "AgentV1SettingsAgentSpeakOneItemProvider_Cartesia", - "AgentV1SettingsAgentSpeakOneItemProvider_Deepgram", - "AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs", - "AgentV1SettingsAgentSpeakOneItemProvider_OpenAi", "AgentV1SettingsAgentThink", - "AgentV1SettingsAgentThinkOneItem", - "AgentV1SettingsAgentThinkOneItemContextLength", - "AgentV1SettingsAgentThinkOneItemEndpoint", - "AgentV1SettingsAgentThinkOneItemFunctionsItem", - "AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint", - "AgentV1SettingsAgentThinkOneItemProvider", "AgentV1SettingsApplied", - "AgentV1SettingsAppliedType", "AgentV1SettingsAudio", "AgentV1SettingsAudioInput", "AgentV1SettingsAudioInputEncoding", "AgentV1SettingsAudioOutput", "AgentV1SettingsAudioOutputEncoding", "AgentV1SettingsFlags", - "AgentV1SettingsType", "AgentV1SpeakUpdated", - "AgentV1SpeakUpdatedType", + "AgentV1ThinkUpdated", "AgentV1UpdatePrompt", - "AgentV1UpdatePromptType", "AgentV1UpdateSpeak", "AgentV1UpdateSpeakSpeak", - "AgentV1UpdateSpeakSpeakEndpoint", - "AgentV1UpdateSpeakSpeakEndpointEndpoint", - "AgentV1UpdateSpeakSpeakEndpointProvider", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesia", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgram", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAi", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia", - "AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram", - "AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi", - "AgentV1UpdateSpeakSpeakOneItem", - "AgentV1UpdateSpeakSpeakOneItemEndpoint", - "AgentV1UpdateSpeakSpeakOneItemProvider", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine", - "AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesia", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgram", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel", - "AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId", - "AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAi", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion", - "AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice", - "AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly", - "AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia", - "AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram", - "AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs", - "AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi", - "AgentV1UpdateSpeakType", + "AgentV1UpdateThink", + "AgentV1UpdateThinkThink", "AgentV1UserStartedSpeaking", - "AgentV1UserStartedSpeakingType", "AgentV1Warning", - "AgentV1WarningType", "AgentV1Welcome", - "AgentV1WelcomeType", - "Cartesia", - "Deepgram", - "ElevenLabs", - "Max", ] diff --git a/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py index 36b8a608..a8d9fddd 100644 --- a/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py +++ b/src/deepgram/agent/v1/types/agent_v1agent_audio_done.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1agent_audio_done_type import AgentV1AgentAudioDoneType class AgentV1AgentAudioDone(UncheckedBaseModel): - type: AgentV1AgentAudioDoneType = pydantic.Field() + type: typing.Literal["AgentAudioDone"] = pydantic.Field(default="AgentAudioDone") """ Message type identifier indicating the agent has finished sending audio """ diff --git a/src/deepgram/agent/v1/types/agent_v1agent_audio_done_type.py b/src/deepgram/agent/v1/types/agent_v1agent_audio_done_type.py deleted file mode 100644 index 3fafb98d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1agent_audio_done_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1AgentAudioDoneType = typing.Union[typing.Literal["AgentAudioDone"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py index a85ba7d2..ba913bf5 100644 --- a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py +++ b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1agent_started_speaking_type import AgentV1AgentStartedSpeakingType class AgentV1AgentStartedSpeaking(UncheckedBaseModel): - type: AgentV1AgentStartedSpeakingType = pydantic.Field() + type: typing.Literal["AgentStartedSpeaking"] = pydantic.Field(default="AgentStartedSpeaking") """ Message type identifier for agent started speaking """ diff --git a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking_type.py b/src/deepgram/agent/v1/types/agent_v1agent_started_speaking_type.py deleted file mode 100644 index e677505a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1agent_started_speaking_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1AgentStartedSpeakingType = typing.Union[typing.Literal["AgentStartedSpeaking"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1agent_thinking.py b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py index 77dad48b..7c3e6531 100644 --- a/src/deepgram/agent/v1/types/agent_v1agent_thinking.py +++ b/src/deepgram/agent/v1/types/agent_v1agent_thinking.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1agent_thinking_type import AgentV1AgentThinkingType class AgentV1AgentThinking(UncheckedBaseModel): - type: AgentV1AgentThinkingType = pydantic.Field() + type: typing.Literal["AgentThinking"] = pydantic.Field(default="AgentThinking") """ Message type identifier for agent thinking """ diff --git a/src/deepgram/agent/v1/types/agent_v1agent_thinking_type.py b/src/deepgram/agent/v1/types/agent_v1agent_thinking_type.py deleted file mode 100644 index 34516f80..00000000 --- a/src/deepgram/agent/v1/types/agent_v1agent_thinking_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1AgentThinkingType = typing.Union[typing.Literal["AgentThinking"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text.py b/src/deepgram/agent/v1/types/agent_v1conversation_text.py index 30722582..10148aec 100644 --- a/src/deepgram/agent/v1/types/agent_v1conversation_text.py +++ b/src/deepgram/agent/v1/types/agent_v1conversation_text.py @@ -6,11 +6,10 @@ from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel from .agent_v1conversation_text_role import AgentV1ConversationTextRole -from .agent_v1conversation_text_type import AgentV1ConversationTextType class AgentV1ConversationText(UncheckedBaseModel): - type: AgentV1ConversationTextType = pydantic.Field() + type: typing.Literal["ConversationText"] = pydantic.Field(default="ConversationText") """ Message type identifier for conversation text """ diff --git a/src/deepgram/agent/v1/types/agent_v1conversation_text_type.py b/src/deepgram/agent/v1/types/agent_v1conversation_text_type.py deleted file mode 100644 index 6bb3a7cb..00000000 --- a/src/deepgram/agent/v1/types/agent_v1conversation_text_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1ConversationTextType = typing.Union[typing.Literal["ConversationText"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1error.py b/src/deepgram/agent/v1/types/agent_v1error.py index ae292e40..aea1228b 100644 --- a/src/deepgram/agent/v1/types/agent_v1error.py +++ b/src/deepgram/agent/v1/types/agent_v1error.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1error_type import AgentV1ErrorType class AgentV1Error(UncheckedBaseModel): - type: AgentV1ErrorType = pydantic.Field() + type: typing.Literal["Error"] = pydantic.Field(default="Error") """ Message type identifier for error responses """ diff --git a/src/deepgram/agent/v1/types/agent_v1error_type.py b/src/deepgram/agent/v1/types/agent_v1error_type.py deleted file mode 100644 index 3838a7b1..00000000 --- a/src/deepgram/agent/v1/types/agent_v1error_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1ErrorType = typing.Union[typing.Literal["Error"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request.py b/src/deepgram/agent/v1/types/agent_v1function_call_request.py index 8470e291..c587938e 100644 --- a/src/deepgram/agent/v1/types/agent_v1function_call_request.py +++ b/src/deepgram/agent/v1/types/agent_v1function_call_request.py @@ -6,11 +6,10 @@ from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel from .agent_v1function_call_request_functions_item import AgentV1FunctionCallRequestFunctionsItem -from .agent_v1function_call_request_type import AgentV1FunctionCallRequestType class AgentV1FunctionCallRequest(UncheckedBaseModel): - type: AgentV1FunctionCallRequestType = pydantic.Field() + type: typing.Literal["FunctionCallRequest"] = pydantic.Field(default="FunctionCallRequest") """ Message type identifier for function call requests """ diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py index 00e70f92..65d1928e 100644 --- a/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py +++ b/src/deepgram/agent/v1/types/agent_v1function_call_request_functions_item.py @@ -28,6 +28,11 @@ class AgentV1FunctionCallRequestFunctionsItem(UncheckedBaseModel): Whether the function should be executed client-side """ + thought_signature: typing.Optional[str] = pydantic.Field(default=None) + """ + Some Gemini models require this as an additional function call identifier + """ + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/deepgram/agent/v1/types/agent_v1function_call_request_type.py b/src/deepgram/agent/v1/types/agent_v1function_call_request_type.py deleted file mode 100644 index 8fbcd24d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1function_call_request_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1FunctionCallRequestType = typing.Union[typing.Literal["FunctionCallRequest"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py index b7435ced..a2ca5886 100644 --- a/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py +++ b/src/deepgram/agent/v1/types/agent_v1inject_agent_message.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1inject_agent_message_type import AgentV1InjectAgentMessageType class AgentV1InjectAgentMessage(UncheckedBaseModel): - type: AgentV1InjectAgentMessageType = pydantic.Field() + type: typing.Literal["InjectAgentMessage"] = pydantic.Field(default="InjectAgentMessage") """ Message type identifier for injecting an agent message """ diff --git a/src/deepgram/agent/v1/types/agent_v1inject_agent_message_type.py b/src/deepgram/agent/v1/types/agent_v1inject_agent_message_type.py deleted file mode 100644 index 0bda4261..00000000 --- a/src/deepgram/agent/v1/types/agent_v1inject_agent_message_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1InjectAgentMessageType = typing.Union[typing.Literal["InjectAgentMessage"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1inject_user_message.py b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py index bf0df4b2..f10cd987 100644 --- a/src/deepgram/agent/v1/types/agent_v1inject_user_message.py +++ b/src/deepgram/agent/v1/types/agent_v1inject_user_message.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1inject_user_message_type import AgentV1InjectUserMessageType class AgentV1InjectUserMessage(UncheckedBaseModel): - type: AgentV1InjectUserMessageType = pydantic.Field() + type: typing.Literal["InjectUserMessage"] = pydantic.Field(default="InjectUserMessage") """ Message type identifier for injecting a user message """ diff --git a/src/deepgram/agent/v1/types/agent_v1inject_user_message_type.py b/src/deepgram/agent/v1/types/agent_v1inject_user_message_type.py deleted file mode 100644 index 54f3d351..00000000 --- a/src/deepgram/agent/v1/types/agent_v1inject_user_message_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1InjectUserMessageType = typing.Union[typing.Literal["InjectUserMessage"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1injection_refused.py b/src/deepgram/agent/v1/types/agent_v1injection_refused.py index f557ce09..619fb932 100644 --- a/src/deepgram/agent/v1/types/agent_v1injection_refused.py +++ b/src/deepgram/agent/v1/types/agent_v1injection_refused.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1injection_refused_type import AgentV1InjectionRefusedType class AgentV1InjectionRefused(UncheckedBaseModel): - type: AgentV1InjectionRefusedType = pydantic.Field() + type: typing.Literal["InjectionRefused"] = pydantic.Field(default="InjectionRefused") """ Message type identifier for injection refused """ diff --git a/src/deepgram/agent/v1/types/agent_v1injection_refused_type.py b/src/deepgram/agent/v1/types/agent_v1injection_refused_type.py deleted file mode 100644 index d44019c0..00000000 --- a/src/deepgram/agent/v1/types/agent_v1injection_refused_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1InjectionRefusedType = typing.Union[typing.Literal["InjectionRefused"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1keep_alive.py b/src/deepgram/agent/v1/types/agent_v1keep_alive.py index 1b69a573..4dcfc4e0 100644 --- a/src/deepgram/agent/v1/types/agent_v1keep_alive.py +++ b/src/deepgram/agent/v1/types/agent_v1keep_alive.py @@ -5,7 +5,6 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1keep_alive_type import AgentV1KeepAliveType class AgentV1KeepAlive(UncheckedBaseModel): @@ -13,7 +12,7 @@ class AgentV1KeepAlive(UncheckedBaseModel): Send a control message to the agent """ - type: AgentV1KeepAliveType = pydantic.Field() + type: typing.Literal["KeepAlive"] = pydantic.Field(default="KeepAlive") """ Message type identifier """ diff --git a/src/deepgram/agent/v1/types/agent_v1prompt_updated.py b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py index 6f5c7d43..f0840cd5 100644 --- a/src/deepgram/agent/v1/types/agent_v1prompt_updated.py +++ b/src/deepgram/agent/v1/types/agent_v1prompt_updated.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1prompt_updated_type import AgentV1PromptUpdatedType class AgentV1PromptUpdated(UncheckedBaseModel): - type: AgentV1PromptUpdatedType = pydantic.Field() + type: typing.Literal["PromptUpdated"] = pydantic.Field(default="PromptUpdated") """ Message type identifier for prompt update confirmation """ diff --git a/src/deepgram/agent/v1/types/agent_v1prompt_updated_type.py b/src/deepgram/agent/v1/types/agent_v1prompt_updated_type.py deleted file mode 100644 index 7b3cd88d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1prompt_updated_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1PromptUpdatedType = typing.Union[typing.Literal["PromptUpdated"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py index b7dcd120..3501450d 100644 --- a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py +++ b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response.py @@ -5,7 +5,6 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1receive_function_call_response_type import AgentV1ReceiveFunctionCallResponseType class AgentV1ReceiveFunctionCallResponse(UncheckedBaseModel): @@ -21,7 +20,7 @@ class AgentV1ReceiveFunctionCallResponse(UncheckedBaseModel): interface for function call responses regardless of execution location. """ - type: AgentV1ReceiveFunctionCallResponseType = pydantic.Field() + type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse") """ Message type identifier for function call responses """ diff --git a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response_type.py b/src/deepgram/agent/v1/types/agent_v1receive_function_call_response_type.py deleted file mode 100644 index 8886b2fd..00000000 --- a/src/deepgram/agent/v1/types/agent_v1receive_function_call_response_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1ReceiveFunctionCallResponseType = typing.Union[typing.Literal["FunctionCallResponse"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py index 5d635197..6cdef2ff 100644 --- a/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py +++ b/src/deepgram/agent/v1/types/agent_v1send_function_call_response.py @@ -5,7 +5,6 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1send_function_call_response_type import AgentV1SendFunctionCallResponseType class AgentV1SendFunctionCallResponse(UncheckedBaseModel): @@ -21,7 +20,7 @@ class AgentV1SendFunctionCallResponse(UncheckedBaseModel): interface for function call responses regardless of execution location. """ - type: AgentV1SendFunctionCallResponseType = pydantic.Field() + type: typing.Literal["FunctionCallResponse"] = pydantic.Field(default="FunctionCallResponse") """ Message type identifier for function call responses """ diff --git a/src/deepgram/agent/v1/types/agent_v1send_function_call_response_type.py b/src/deepgram/agent/v1/types/agent_v1send_function_call_response_type.py deleted file mode 100644 index 3712790e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1send_function_call_response_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SendFunctionCallResponseType = typing.Union[typing.Literal["FunctionCallResponse"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings.py b/src/deepgram/agent/v1/types/agent_v1settings.py index 82de5bbf..11601dfc 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings.py +++ b/src/deepgram/agent/v1/types/agent_v1settings.py @@ -8,11 +8,10 @@ from .agent_v1settings_agent import AgentV1SettingsAgent from .agent_v1settings_audio import AgentV1SettingsAudio from .agent_v1settings_flags import AgentV1SettingsFlags -from .agent_v1settings_type import AgentV1SettingsType class AgentV1Settings(UncheckedBaseModel): - type: AgentV1SettingsType + type: typing.Literal["Settings"] = "Settings" tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) """ Tags to associate with the request diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py index 812518f9..c1ad74ec 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content.py @@ -8,9 +8,6 @@ from .agent_v1settings_agent_context_messages_item_content_role import ( AgentV1SettingsAgentContextMessagesItemContentRole, ) -from .agent_v1settings_agent_context_messages_item_content_type import ( - AgentV1SettingsAgentContextMessagesItemContentType, -) class AgentV1SettingsAgentContextMessagesItemContent(UncheckedBaseModel): @@ -18,7 +15,7 @@ class AgentV1SettingsAgentContextMessagesItemContent(UncheckedBaseModel): Conversation text as part of the conversation history """ - type: AgentV1SettingsAgentContextMessagesItemContentType = pydantic.Field() + type: typing.Literal["History"] = pydantic.Field(default="History") """ Message type identifier for conversation text """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_type.py deleted file mode 100644 index 49b525f9..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_content_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentContextMessagesItemContentType = typing.Union[typing.Literal["History"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py index 61afaf26..dd693535 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls.py @@ -8,9 +8,6 @@ from .agent_v1settings_agent_context_messages_item_function_calls_function_calls_item import ( AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem, ) -from .agent_v1settings_agent_context_messages_item_function_calls_type import ( - AgentV1SettingsAgentContextMessagesItemFunctionCallsType, -) class AgentV1SettingsAgentContextMessagesItemFunctionCalls(UncheckedBaseModel): @@ -18,7 +15,7 @@ class AgentV1SettingsAgentContextMessagesItemFunctionCalls(UncheckedBaseModel): Client-side or server-side function call request and response as part of the conversation history """ - type: AgentV1SettingsAgentContextMessagesItemFunctionCallsType + type: typing.Literal["History"] = "History" function_calls: typing.List[AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem] = ( pydantic.Field() ) diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py index 7534c9b1..9ce668a7 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_function_calls_item.py @@ -33,6 +33,11 @@ class AgentV1SettingsAgentContextMessagesItemFunctionCallsFunctionCallsItem(Unch Response from the function call """ + thought_signature: typing.Optional[str] = pydantic.Field(default=None) + """ + Some Gemini models require this as an additional function call identifier + """ + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_type.py deleted file mode 100644 index 18d6ace1..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_context_messages_item_function_calls_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentContextMessagesItemFunctionCallsType = typing.Union[typing.Literal["History"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py index c87d09ab..9800b27d 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider.py @@ -8,13 +8,11 @@ import typing_extensions from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1settings_agent_listen_provider_v1type import AgentV1SettingsAgentListenProviderV1Type -from .agent_v1settings_agent_listen_provider_v2type import AgentV1SettingsAgentListenProviderV2Type class AgentV1SettingsAgentListenProvider_V1(UncheckedBaseModel): version: typing.Literal["v1"] = "v1" - type: AgentV1SettingsAgentListenProviderV1Type + type: typing.Literal["deepgram"] = "deepgram" model: typing.Optional[str] = None language: typing.Optional[str] = None keyterms: typing.Optional[typing.List[str]] = None @@ -32,7 +30,7 @@ class Config: class AgentV1SettingsAgentListenProvider_V2(UncheckedBaseModel): version: typing.Literal["v2"] = "v2" - type: AgentV1SettingsAgentListenProviderV2Type + type: typing.Literal["deepgram"] = "deepgram" model: str keyterms: typing.Optional[typing.List[str]] = None diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py index 13bcf34f..3370c965 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_listen_provider_v1type import AgentV1SettingsAgentListenProviderV1Type class AgentV1SettingsAgentListenProviderV1(UncheckedBaseModel): - type: AgentV1SettingsAgentListenProviderV1Type = pydantic.Field() + type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") """ Provider type for speech-to-text """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1type.py deleted file mode 100644 index 1baf7137..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v1type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentListenProviderV1Type = typing.Union[typing.Literal["deepgram"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py index 058ec6bd..5975d23e 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_listen_provider_v2type import AgentV1SettingsAgentListenProviderV2Type class AgentV1SettingsAgentListenProviderV2(UncheckedBaseModel): - type: AgentV1SettingsAgentListenProviderV2Type = pydantic.Field() + type: typing.Literal["deepgram"] = pydantic.Field(default="deepgram") """ Provider type for speech-to-text """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2type.py deleted file mode 100644 index 90180c18..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_listen_provider_v2type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentListenProviderV2Type = typing.Union[typing.Literal["deepgram"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py index 0ad6d1c5..84a61a59 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak.py @@ -2,9 +2,6 @@ import typing -from .agent_v1settings_agent_speak_endpoint import AgentV1SettingsAgentSpeakEndpoint -from .agent_v1settings_agent_speak_one_item import AgentV1SettingsAgentSpeakOneItem +from ....types.speak_settings_v1 import SpeakSettingsV1 -AgentV1SettingsAgentSpeak = typing.Union[ - AgentV1SettingsAgentSpeakEndpoint, typing.List[AgentV1SettingsAgentSpeakOneItem] -] +AgentV1SettingsAgentSpeak = typing.Union[SpeakSettingsV1, typing.List[SpeakSettingsV1]] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py deleted file mode 100644 index d0b3ec7b..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_endpoint_endpoint import AgentV1SettingsAgentSpeakEndpointEndpoint -from .agent_v1settings_agent_speak_endpoint_provider import AgentV1SettingsAgentSpeakEndpointProvider - - -class AgentV1SettingsAgentSpeakEndpoint(UncheckedBaseModel): - provider: AgentV1SettingsAgentSpeakEndpointProvider - endpoint: typing.Optional[AgentV1SettingsAgentSpeakEndpointEndpoint] = pydantic.Field(default=None) - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py deleted file mode 100644 index 2778435f..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_endpoint.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1SettingsAgentSpeakEndpointEndpoint(UncheckedBaseModel): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing.Optional[str] = pydantic.Field(default=None) - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing.Optional[typing.Dict[str, str]] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py deleted file mode 100644 index c7d724b2..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider.py +++ /dev/null @@ -1,144 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -import typing_extensions -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, -) -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, -) -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, -) -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from .agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) - - -class AgentV1SettingsAgentSpeakEndpointProvider_Deepgram(UncheckedBaseModel): - type: typing.Literal["deepgram"] = "deepgram" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] = None - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs(UncheckedBaseModel): - type: typing.Literal["eleven_labs"] = "eleven_labs" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] = None - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId - language: typing.Optional[str] = None - language_code: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakEndpointProvider_Cartesia(UncheckedBaseModel): - type: typing.Literal["cartesia"] = "cartesia" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] = None - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice - language: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakEndpointProvider_OpenAi(UncheckedBaseModel): - type: typing.Literal["open_ai"] = "open_ai" - version: typing.Optional[AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion] = None - model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel - voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly(UncheckedBaseModel): - type: typing.Literal["aws_polly"] = "aws_polly" - voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice - language: str - language_code: typing.Optional[str] = None - engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -AgentV1SettingsAgentSpeakEndpointProvider = typing_extensions.Annotated[ - typing.Union[ - AgentV1SettingsAgentSpeakEndpointProvider_Deepgram, - AgentV1SettingsAgentSpeakEndpointProvider_ElevenLabs, - AgentV1SettingsAgentSpeakEndpointProvider_Cartesia, - AgentV1SettingsAgentSpeakEndpointProvider_OpenAi, - AgentV1SettingsAgentSpeakEndpointProvider_AwsPolly, - ], - UnionMetadata(discriminant="type"), -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py deleted file mode 100644 index 256cfba6..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine, -) -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderAwsPolly(UncheckedBaseModel): - voice: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = pydantic.Field() - """ - AWS Polly voice name - """ - - language: str = pydantic.Field() - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - engine: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py deleted file mode 100644 index 95284bf1..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentials(UncheckedBaseModel): - type: AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing.Optional[str] = pydantic.Field(default=None) - """ - Required for STS only - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py deleted file mode 100644 index 515f0617..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_credentials_type.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderAwsPollyCredentialsType = typing.Union[ - typing.Literal["sts", "iam"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py deleted file mode 100644 index 2f182419..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_engine.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderAwsPollyEngine = typing.Union[ - typing.Literal["generative", "long-form", "standard", "neural"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py deleted file mode 100644 index 0079e7b3..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_aws_polly_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderAwsPollyVoice = typing.Union[ - typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py deleted file mode 100644 index b81e30b3..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderCartesiaModelId = typing.Union[ - typing.Literal["sonic-2", "sonic-multilingual"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_version.py deleted file mode 100644 index f29cec69..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderCartesiaVersion = typing.Union[typing.Literal["2025-03-17"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py deleted file mode 100644 index 161119d8..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_model.py +++ /dev/null @@ -1,72 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderDeepgramModel = typing.Union[ - typing.Literal[ - "aura-asteria-en", - "aura-luna-en", - "aura-stella-en", - "aura-athena-en", - "aura-hera-en", - "aura-orion-en", - "aura-arcas-en", - "aura-perseus-en", - "aura-angus-en", - "aura-orpheus-en", - "aura-helios-en", - "aura-zeus-en", - "aura-2-amalthea-en", - "aura-2-andromeda-en", - "aura-2-apollo-en", - "aura-2-arcas-en", - "aura-2-aries-en", - "aura-2-asteria-en", - "aura-2-athena-en", - "aura-2-atlas-en", - "aura-2-aurora-en", - "aura-2-callista-en", - "aura-2-cora-en", - "aura-2-cordelia-en", - "aura-2-delia-en", - "aura-2-draco-en", - "aura-2-electra-en", - "aura-2-harmonia-en", - "aura-2-helena-en", - "aura-2-hera-en", - "aura-2-hermes-en", - "aura-2-hyperion-en", - "aura-2-iris-en", - "aura-2-janus-en", - "aura-2-juno-en", - "aura-2-jupiter-en", - "aura-2-luna-en", - "aura-2-mars-en", - "aura-2-minerva-en", - "aura-2-neptune-en", - "aura-2-odysseus-en", - "aura-2-ophelia-en", - "aura-2-orion-en", - "aura-2-orpheus-en", - "aura-2-pandora-en", - "aura-2-phoebe-en", - "aura-2-pluto-en", - "aura-2-saturn-en", - "aura-2-selene-en", - "aura-2-thalia-en", - "aura-2-theia-en", - "aura-2-vesta-en", - "aura-2-zeus-en", - "aura-2-sirio-es", - "aura-2-nestor-es", - "aura-2-carina-es", - "aura-2-celeste-es", - "aura-2-alvaro-es", - "aura-2-diana-es", - "aura-2-aquila-es", - "aura-2-selena-es", - "aura-2-estrella-es", - "aura-2-javier-es", - ], - typing.Any, -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_version.py deleted file mode 100644 index e9cd4832..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_deepgram_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderDeepgramVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py deleted file mode 100644 index 4ed8c7e8..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderElevenLabsModelId = typing.Union[ - typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version.py deleted file mode 100644 index 2a21cb4c..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_eleven_labs_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderElevenLabsVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py deleted file mode 100644 index 506f5245..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel, -) -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion, -) -from .agent_v1settings_agent_speak_endpoint_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice, -) - - -class AgentV1SettingsAgentSpeakEndpointProviderOpenAi(UncheckedBaseModel): - version: typing.Optional[AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion] = pydantic.Field(default=None) - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel = pydantic.Field() - """ - OpenAI TTS model - """ - - voice: AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice = pydantic.Field() - """ - OpenAI voice - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py deleted file mode 100644 index f83a1943..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_version.py deleted file mode 100644 index 8dfcd1d7..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderOpenAiVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py deleted file mode 100644 index 0e8a10eb..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_open_ai_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakEndpointProviderOpenAiVoice = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py deleted file mode 100644 index b95fc91a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_endpoint import AgentV1SettingsAgentSpeakOneItemEndpoint -from .agent_v1settings_agent_speak_one_item_provider import AgentV1SettingsAgentSpeakOneItemProvider - - -class AgentV1SettingsAgentSpeakOneItem(UncheckedBaseModel): - provider: AgentV1SettingsAgentSpeakOneItemProvider - endpoint: typing.Optional[AgentV1SettingsAgentSpeakOneItemEndpoint] = pydantic.Field(default=None) - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py deleted file mode 100644 index 50e760d6..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_endpoint.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1SettingsAgentSpeakOneItemEndpoint(UncheckedBaseModel): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing.Optional[str] = pydantic.Field(default=None) - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing.Optional[typing.Dict[str, str]] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py deleted file mode 100644 index 5c7c7c82..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider.py +++ /dev/null @@ -1,144 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -import typing_extensions -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, -) -from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from .agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, -) -from .agent_v1settings_agent_speak_one_item_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1SettingsAgentSpeakOneItemProvider_Deepgram(UncheckedBaseModel): - type: typing.Literal["deepgram"] = "deepgram" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] = None - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs(UncheckedBaseModel): - type: typing.Literal["eleven_labs"] = "eleven_labs" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] = None - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId - language: typing.Optional[str] = None - language_code: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakOneItemProvider_Cartesia(UncheckedBaseModel): - type: typing.Literal["cartesia"] = "cartesia" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] = None - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice - language: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakOneItemProvider_OpenAi(UncheckedBaseModel): - type: typing.Literal["open_ai"] = "open_ai" - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion] = None - model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel - voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly(UncheckedBaseModel): - type: typing.Literal["aws_polly"] = "aws_polly" - voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice - language: str - language_code: typing.Optional[str] = None - engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -AgentV1SettingsAgentSpeakOneItemProvider = typing_extensions.Annotated[ - typing.Union[ - AgentV1SettingsAgentSpeakOneItemProvider_Deepgram, - AgentV1SettingsAgentSpeakOneItemProvider_ElevenLabs, - AgentV1SettingsAgentSpeakOneItemProvider_Cartesia, - AgentV1SettingsAgentSpeakOneItemProvider_OpenAi, - AgentV1SettingsAgentSpeakOneItemProvider_AwsPolly, - ], - UnionMetadata(discriminant="type"), -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py deleted file mode 100644 index 11c3ab25..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_engine import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine, -) -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderAwsPolly(UncheckedBaseModel): - voice: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice = pydantic.Field() - """ - AWS Polly voice name - """ - - language: str = pydantic.Field() - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - engine: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py deleted file mode 100644 index d8dcb07e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentials(UncheckedBaseModel): - type: AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing.Optional[str] = pydantic.Field(default=None) - """ - Required for STS only - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py deleted file mode 100644 index 3cd1c64f..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_credentials_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py deleted file mode 100644 index 7313bd82..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_engine.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderAwsPollyEngine = typing.Union[ - typing.Literal["generative", "long-form", "standard", "neural"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py deleted file mode 100644 index ad77ee0f..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_aws_polly_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderAwsPollyVoice = typing.Union[ - typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py deleted file mode 100644 index 8d062938..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId = typing.Union[ - typing.Literal["sonic-2", "sonic-multilingual"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_version.py deleted file mode 100644 index 8a7e04a5..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion = typing.Union[typing.Literal["2025-03-17"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py deleted file mode 100644 index 2c896d8d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_model.py +++ /dev/null @@ -1,72 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel = typing.Union[ - typing.Literal[ - "aura-asteria-en", - "aura-luna-en", - "aura-stella-en", - "aura-athena-en", - "aura-hera-en", - "aura-orion-en", - "aura-arcas-en", - "aura-perseus-en", - "aura-angus-en", - "aura-orpheus-en", - "aura-helios-en", - "aura-zeus-en", - "aura-2-amalthea-en", - "aura-2-andromeda-en", - "aura-2-apollo-en", - "aura-2-arcas-en", - "aura-2-aries-en", - "aura-2-asteria-en", - "aura-2-athena-en", - "aura-2-atlas-en", - "aura-2-aurora-en", - "aura-2-callista-en", - "aura-2-cora-en", - "aura-2-cordelia-en", - "aura-2-delia-en", - "aura-2-draco-en", - "aura-2-electra-en", - "aura-2-harmonia-en", - "aura-2-helena-en", - "aura-2-hera-en", - "aura-2-hermes-en", - "aura-2-hyperion-en", - "aura-2-iris-en", - "aura-2-janus-en", - "aura-2-juno-en", - "aura-2-jupiter-en", - "aura-2-luna-en", - "aura-2-mars-en", - "aura-2-minerva-en", - "aura-2-neptune-en", - "aura-2-odysseus-en", - "aura-2-ophelia-en", - "aura-2-orion-en", - "aura-2-orpheus-en", - "aura-2-pandora-en", - "aura-2-phoebe-en", - "aura-2-pluto-en", - "aura-2-saturn-en", - "aura-2-selene-en", - "aura-2-thalia-en", - "aura-2-theia-en", - "aura-2-vesta-en", - "aura-2-zeus-en", - "aura-2-sirio-es", - "aura-2-nestor-es", - "aura-2-carina-es", - "aura-2-celeste-es", - "aura-2-alvaro-es", - "aura-2-diana-es", - "aura-2-aquila-es", - "aura-2-selena-es", - "aura-2-estrella-es", - "aura-2-javier-es", - ], - typing.Any, -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_version.py deleted file mode 100644 index db5b1714..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_deepgram_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py deleted file mode 100644 index f2bbee02..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId = typing.Union[ - typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_version.py deleted file mode 100644 index 5498f864..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_eleven_labs_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai.py deleted file mode 100644 index c21f1b63..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_open_ai_model import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel, -) -from .agent_v1settings_agent_speak_one_item_provider_open_ai_version import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_open_ai_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1SettingsAgentSpeakOneItemProviderOpenAi(UncheckedBaseModel): - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion] = pydantic.Field(default=None) - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel = pydantic.Field() - """ - OpenAI TTS model - """ - - voice: AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice = pydantic.Field() - """ - OpenAI voice - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py deleted file mode 100644 index 21ea5697..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_version.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_version.py deleted file mode 100644 index f5201c98..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderOpenAiVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py deleted file mode 100644 index 7e4bc122..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_open_ai_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAgentSpeakOneItemProviderOpenAiVoice = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py index f4a89bf9..2521acf2 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_agent_think.py @@ -3,6 +3,5 @@ import typing from ....types.think_settings_v1 import ThinkSettingsV1 -from .agent_v1settings_agent_think_one_item import AgentV1SettingsAgentThinkOneItem -AgentV1SettingsAgentThink = typing.Union[ThinkSettingsV1, typing.List[AgentV1SettingsAgentThinkOneItem]] +AgentV1SettingsAgentThink = typing.Union[ThinkSettingsV1, typing.List[ThinkSettingsV1]] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item.py deleted file mode 100644 index d0fa3ebb..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item.py +++ /dev/null @@ -1,35 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_think_one_item_context_length import AgentV1SettingsAgentThinkOneItemContextLength -from .agent_v1settings_agent_think_one_item_endpoint import AgentV1SettingsAgentThinkOneItemEndpoint -from .agent_v1settings_agent_think_one_item_functions_item import AgentV1SettingsAgentThinkOneItemFunctionsItem -from .agent_v1settings_agent_think_one_item_provider import AgentV1SettingsAgentThinkOneItemProvider - - -class AgentV1SettingsAgentThinkOneItem(UncheckedBaseModel): - provider: AgentV1SettingsAgentThinkOneItemProvider - endpoint: typing.Optional[AgentV1SettingsAgentThinkOneItemEndpoint] = pydantic.Field(default=None) - """ - Optional for non-Deepgram LLM providers. When present, must include url field and headers object - """ - - functions: typing.Optional[typing.List[AgentV1SettingsAgentThinkOneItemFunctionsItem]] = None - prompt: typing.Optional[str] = None - context_length: typing.Optional[AgentV1SettingsAgentThinkOneItemContextLength] = pydantic.Field(default=None) - """ - Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_context_length.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_context_length.py deleted file mode 100644 index e2c64663..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_context_length.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from .max import Max - -AgentV1SettingsAgentThinkOneItemContextLength = typing.Union[Max, float] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_provider.py b/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_provider.py deleted file mode 100644 index 239fc6c9..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_provider.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -from ....types.anthropic import Anthropic -from ....types.aws_bedrock_think_provider import AwsBedrockThinkProvider -from ....types.google import Google -from ....types.groq import Groq -from ....types.open_ai_think_provider import OpenAiThinkProvider - -AgentV1SettingsAgentThinkOneItemProvider = typing.Union[ - OpenAiThinkProvider, AwsBedrockThinkProvider, Anthropic, Google, Groq -] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_applied.py b/src/deepgram/agent/v1/types/agent_v1settings_applied.py index de27cee7..065643b6 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_applied.py +++ b/src/deepgram/agent/v1/types/agent_v1settings_applied.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_applied_type import AgentV1SettingsAppliedType class AgentV1SettingsApplied(UncheckedBaseModel): - type: AgentV1SettingsAppliedType = pydantic.Field() + type: typing.Literal["SettingsApplied"] = pydantic.Field(default="SettingsApplied") """ Message type identifier for settings applied confirmation """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_applied_type.py b/src/deepgram/agent/v1/types/agent_v1settings_applied_type.py deleted file mode 100644 index 51a5cf6b..00000000 --- a/src/deepgram/agent/v1/types/agent_v1settings_applied_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SettingsAppliedType = typing.Union[typing.Literal["SettingsApplied"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1speak_updated.py b/src/deepgram/agent/v1/types/agent_v1speak_updated.py index 41a6e4ed..956dcfd7 100644 --- a/src/deepgram/agent/v1/types/agent_v1speak_updated.py +++ b/src/deepgram/agent/v1/types/agent_v1speak_updated.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1speak_updated_type import AgentV1SpeakUpdatedType class AgentV1SpeakUpdated(UncheckedBaseModel): - type: AgentV1SpeakUpdatedType = pydantic.Field() + type: typing.Literal["SpeakUpdated"] = pydantic.Field(default="SpeakUpdated") """ Message type identifier for speak update confirmation """ diff --git a/src/deepgram/agent/v1/types/agent_v1speak_updated_type.py b/src/deepgram/agent/v1/types/agent_v1speak_updated_type.py deleted file mode 100644 index ccab8cfc..00000000 --- a/src/deepgram/agent/v1/types/agent_v1speak_updated_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1SpeakUpdatedType = typing.Union[typing.Literal["SpeakUpdated"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1think_updated.py similarity index 71% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py rename to src/deepgram/agent/v1/types/agent_v1think_updated.py index c44e1eac..76274f06 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_voice.py +++ b/src/deepgram/agent/v1/types/agent_v1think_updated.py @@ -7,15 +7,10 @@ from ....core.unchecked_base_model import UncheckedBaseModel -class AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice(UncheckedBaseModel): - mode: str = pydantic.Field() +class AgentV1ThinkUpdated(UncheckedBaseModel): + type: typing.Literal["ThinkUpdated"] = pydantic.Field(default="ThinkUpdated") """ - Cartesia voice mode - """ - - id: str = pydantic.Field() - """ - Cartesia voice ID + Message type identifier for think update confirmation """ if IS_PYDANTIC_V2: diff --git a/src/deepgram/agent/v1/types/agent_v1update_prompt.py b/src/deepgram/agent/v1/types/agent_v1update_prompt.py index 90162802..8cbada41 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_prompt.py +++ b/src/deepgram/agent/v1/types/agent_v1update_prompt.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_prompt_type import AgentV1UpdatePromptType class AgentV1UpdatePrompt(UncheckedBaseModel): - type: AgentV1UpdatePromptType = pydantic.Field() + type: typing.Literal["UpdatePrompt"] = pydantic.Field(default="UpdatePrompt") """ Message type identifier for prompt update request """ diff --git a/src/deepgram/agent/v1/types/agent_v1update_prompt_type.py b/src/deepgram/agent/v1/types/agent_v1update_prompt_type.py deleted file mode 100644 index ca8122be..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_prompt_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdatePromptType = typing.Union[typing.Literal["UpdatePrompt"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak.py index 5d7a7e41..4b045d69 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak.py +++ b/src/deepgram/agent/v1/types/agent_v1update_speak.py @@ -6,11 +6,10 @@ from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel from .agent_v1update_speak_speak import AgentV1UpdateSpeakSpeak -from .agent_v1update_speak_type import AgentV1UpdateSpeakType class AgentV1UpdateSpeak(UncheckedBaseModel): - type: AgentV1UpdateSpeakType = pydantic.Field() + type: typing.Literal["UpdateSpeak"] = pydantic.Field(default="UpdateSpeak") """ Message type identifier for updating the speak model """ diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py index a5a48320..eb34f060 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py +++ b/src/deepgram/agent/v1/types/agent_v1update_speak_speak.py @@ -2,7 +2,6 @@ import typing -from .agent_v1update_speak_speak_endpoint import AgentV1UpdateSpeakSpeakEndpoint -from .agent_v1update_speak_speak_one_item import AgentV1UpdateSpeakSpeakOneItem +from ....types.speak_settings_v1 import SpeakSettingsV1 -AgentV1UpdateSpeakSpeak = typing.Union[AgentV1UpdateSpeakSpeakEndpoint, typing.List[AgentV1UpdateSpeakSpeakOneItem]] +AgentV1UpdateSpeakSpeak = typing.Union[SpeakSettingsV1, typing.List[SpeakSettingsV1]] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py deleted file mode 100644 index 86a7280b..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_endpoint import AgentV1UpdateSpeakSpeakEndpointEndpoint -from .agent_v1update_speak_speak_endpoint_provider import AgentV1UpdateSpeakSpeakEndpointProvider - - -class AgentV1UpdateSpeakSpeakEndpoint(UncheckedBaseModel): - provider: AgentV1UpdateSpeakSpeakEndpointProvider - endpoint: typing.Optional[AgentV1UpdateSpeakSpeakEndpointEndpoint] = pydantic.Field(default=None) - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_endpoint.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_endpoint.py deleted file mode 100644 index e8095d3e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_endpoint.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel - - -class AgentV1UpdateSpeakSpeakEndpointEndpoint(UncheckedBaseModel): - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - url: typing.Optional[str] = pydantic.Field(default=None) - """ - Custom TTS endpoint URL. Cannot contain `output_format` or `model_id` query parameters when the provider is Eleven Labs. - """ - - headers: typing.Optional[typing.Dict[str, str]] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider.py deleted file mode 100644 index f2446d53..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider.py +++ /dev/null @@ -1,144 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -import typing_extensions -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice, -) -from .agent_v1update_speak_speak_endpoint_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, -) -from .agent_v1update_speak_speak_endpoint_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, -) -from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, -) -from .agent_v1update_speak_speak_endpoint_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram(UncheckedBaseModel): - type: typing.Literal["deepgram"] = "deepgram" - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion] = None - model: AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs(UncheckedBaseModel): - type: typing.Literal["eleven_labs"] = "eleven_labs" - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion] = None - model_id: AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId - language: typing.Optional[str] = None - language_code: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia(UncheckedBaseModel): - type: typing.Literal["cartesia"] = "cartesia" - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion] = None - model_id: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId - voice: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice - language: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi(UncheckedBaseModel): - type: typing.Literal["open_ai"] = "open_ai" - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion] = None - model: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel - voice: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly(UncheckedBaseModel): - type: typing.Literal["aws_polly"] = "aws_polly" - voice: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice - language: str - language_code: typing.Optional[str] = None - engine: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -AgentV1UpdateSpeakSpeakEndpointProvider = typing_extensions.Annotated[ - typing.Union[ - AgentV1UpdateSpeakSpeakEndpointProvider_Deepgram, - AgentV1UpdateSpeakSpeakEndpointProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakEndpointProvider_Cartesia, - AgentV1UpdateSpeakSpeakEndpointProvider_OpenAi, - AgentV1UpdateSpeakSpeakEndpointProvider_AwsPolly, - ], - UnionMetadata(discriminant="type"), -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly.py deleted file mode 100644 index 11d2fe03..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine, -) -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderAwsPolly(UncheckedBaseModel): - voice: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice = pydantic.Field() - """ - AWS Polly voice name - """ - - language: str = pydantic.Field() - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - engine: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py deleted file mode 100644 index f042ae94..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentials(UncheckedBaseModel): - type: AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType - region: str - access_key_id: str - secret_access_key: str - session_token: typing.Optional[str] = pydantic.Field(default=None) - """ - Required for STS only - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type.py deleted file mode 100644 index 87063f91..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_credentials_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_engine.py deleted file mode 100644 index 5d9f0a82..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_engine.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyEngine = typing.Union[ - typing.Literal["generative", "long-form", "standard", "neural"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_voice.py deleted file mode 100644 index e8a6592d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_aws_polly_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderAwsPollyVoice = typing.Union[ - typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia.py deleted file mode 100644 index c8e1d77a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderCartesia(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion] = pydantic.Field(default=None) - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId = pydantic.Field() - """ - Cartesia model ID - """ - - voice: AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVoice - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Cartesia language code - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_model_id.py deleted file mode 100644 index d9032180..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderCartesiaModelId = typing.Union[ - typing.Literal["sonic-2", "sonic-multilingual"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_version.py deleted file mode 100644 index 464d35d4..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_cartesia_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderCartesiaVersion = typing.Union[typing.Literal["2025-03-17"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram.py deleted file mode 100644 index 36b8ba77..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel, -) -from .agent_v1update_speak_speak_endpoint_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderDeepgram(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion] = pydantic.Field(default=None) - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel = pydantic.Field() - """ - Deepgram TTS model - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_model.py deleted file mode 100644 index 59ca4fd1..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_model.py +++ /dev/null @@ -1,72 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderDeepgramModel = typing.Union[ - typing.Literal[ - "aura-asteria-en", - "aura-luna-en", - "aura-stella-en", - "aura-athena-en", - "aura-hera-en", - "aura-orion-en", - "aura-arcas-en", - "aura-perseus-en", - "aura-angus-en", - "aura-orpheus-en", - "aura-helios-en", - "aura-zeus-en", - "aura-2-amalthea-en", - "aura-2-andromeda-en", - "aura-2-apollo-en", - "aura-2-arcas-en", - "aura-2-aries-en", - "aura-2-asteria-en", - "aura-2-athena-en", - "aura-2-atlas-en", - "aura-2-aurora-en", - "aura-2-callista-en", - "aura-2-cora-en", - "aura-2-cordelia-en", - "aura-2-delia-en", - "aura-2-draco-en", - "aura-2-electra-en", - "aura-2-harmonia-en", - "aura-2-helena-en", - "aura-2-hera-en", - "aura-2-hermes-en", - "aura-2-hyperion-en", - "aura-2-iris-en", - "aura-2-janus-en", - "aura-2-juno-en", - "aura-2-jupiter-en", - "aura-2-luna-en", - "aura-2-mars-en", - "aura-2-minerva-en", - "aura-2-neptune-en", - "aura-2-odysseus-en", - "aura-2-ophelia-en", - "aura-2-orion-en", - "aura-2-orpheus-en", - "aura-2-pandora-en", - "aura-2-phoebe-en", - "aura-2-pluto-en", - "aura-2-saturn-en", - "aura-2-selene-en", - "aura-2-thalia-en", - "aura-2-theia-en", - "aura-2-vesta-en", - "aura-2-zeus-en", - "aura-2-sirio-es", - "aura-2-nestor-es", - "aura-2-carina-es", - "aura-2-celeste-es", - "aura-2-alvaro-es", - "aura-2-diana-es", - "aura-2-aquila-es", - "aura-2-selena-es", - "aura-2-estrella-es", - "aura-2-javier-es", - ], - typing.Any, -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_version.py deleted file mode 100644 index 367d022a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_deepgram_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderDeepgramVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py deleted file mode 100644 index 6a8d1e7e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId, -) -from .agent_v1update_speak_speak_endpoint_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderElevenLabs(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion] = pydantic.Field(default=None) - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId = pydantic.Field() - """ - Eleven Labs model ID - """ - - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id.py deleted file mode 100644 index f8b807ca..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsModelId = typing.Union[ - typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_version.py deleted file mode 100644 index 127403d2..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_eleven_labs_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderElevenLabsVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai.py deleted file mode 100644 index 29cac1b3..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_endpoint_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel, -) -from .agent_v1update_speak_speak_endpoint_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion, -) -from .agent_v1update_speak_speak_endpoint_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakEndpointProviderOpenAi(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion] = pydantic.Field(default=None) - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel = pydantic.Field() - """ - OpenAI TTS model - """ - - voice: AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice = pydantic.Field() - """ - OpenAI voice - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_model.py deleted file mode 100644 index 76522c3e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_version.py deleted file mode 100644 index e25be55a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_voice.py deleted file mode 100644 index d360b702..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_endpoint_provider_open_ai_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakEndpointProviderOpenAiVoice = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item.py deleted file mode 100644 index 6310beb5..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_endpoint import AgentV1UpdateSpeakSpeakOneItemEndpoint -from .agent_v1update_speak_speak_one_item_provider import AgentV1UpdateSpeakSpeakOneItemProvider - - -class AgentV1UpdateSpeakSpeakOneItem(UncheckedBaseModel): - provider: AgentV1UpdateSpeakSpeakOneItemProvider - endpoint: typing.Optional[AgentV1UpdateSpeakSpeakOneItemEndpoint] = pydantic.Field(default=None) - """ - Optional if provider is Deepgram. Required for non-Deepgram TTS providers. - When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider.py deleted file mode 100644 index 56702f29..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider.py +++ /dev/null @@ -1,144 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations - -import typing - -import pydantic -import typing_extensions -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel, UnionMetadata -from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice, -) -from .agent_v1update_speak_speak_one_item_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, -) -from .agent_v1update_speak_speak_one_item_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, -) -from .agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, -) -from .agent_v1update_speak_speak_one_item_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, -) -from .agent_v1update_speak_speak_one_item_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, -) -from .agent_v1update_speak_speak_one_item_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, -) -from .agent_v1update_speak_speak_one_item_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram(UncheckedBaseModel): - type: typing.Literal["deepgram"] = "deepgram" - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion] = None - model: AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs(UncheckedBaseModel): - type: typing.Literal["eleven_labs"] = "eleven_labs" - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion] = None - model_id: AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId - language: typing.Optional[str] = None - language_code: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia(UncheckedBaseModel): - type: typing.Literal["cartesia"] = "cartesia" - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion] = None - model_id: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId - voice: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice - language: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi(UncheckedBaseModel): - type: typing.Literal["open_ai"] = "open_ai" - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion] = None - model: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel - voice: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly(UncheckedBaseModel): - type: typing.Literal["aws_polly"] = "aws_polly" - voice: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice - language: str - language_code: typing.Optional[str] = None - engine: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -AgentV1UpdateSpeakSpeakOneItemProvider = typing_extensions.Annotated[ - typing.Union[ - AgentV1UpdateSpeakSpeakOneItemProvider_Deepgram, - AgentV1UpdateSpeakSpeakOneItemProvider_ElevenLabs, - AgentV1UpdateSpeakSpeakOneItemProvider_Cartesia, - AgentV1UpdateSpeakSpeakOneItemProvider_OpenAi, - AgentV1UpdateSpeakSpeakOneItemProvider_AwsPolly, - ], - UnionMetadata(discriminant="type"), -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly.py deleted file mode 100644 index 3ef8938c..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_engine import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine, -) -from .agent_v1update_speak_speak_one_item_provider_aws_polly_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderAwsPolly(UncheckedBaseModel): - voice: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice = pydantic.Field() - """ - AWS Polly voice name - """ - - language: str = pydantic.Field() - """ - Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - engine: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine - credentials: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type.py deleted file mode 100644 index a10f9924..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_engine.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_engine.py deleted file mode 100644 index 1ba214f0..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_engine.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyEngine = typing.Union[ - typing.Literal["generative", "long-form", "standard", "neural"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia.py deleted file mode 100644 index 4ebf676c..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_cartesia_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1update_speak_speak_one_item_provider_cartesia_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderCartesia(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion] = pydantic.Field(default=None) - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId = pydantic.Field() - """ - Cartesia model ID - """ - - voice: AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Cartesia language code - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_model_id.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_model_id.py deleted file mode 100644 index 9749b8a5..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_model_id.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderCartesiaModelId = typing.Union[ - typing.Literal["sonic-2", "sonic-multilingual"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_version.py deleted file mode 100644 index 9a6f6c37..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVersion = typing.Union[typing.Literal["2025-03-17"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram.py deleted file mode 100644 index 5ac39d7d..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_deepgram_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel, -) -from .agent_v1update_speak_speak_one_item_provider_deepgram_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderDeepgram(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion] = pydantic.Field(default=None) - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel = pydantic.Field() - """ - Deepgram TTS model - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_version.py deleted file mode 100644 index 757270a9..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderDeepgramVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs.py deleted file mode 100644 index 5e74832b..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId, -) -from .agent_v1update_speak_speak_one_item_provider_eleven_labs_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderElevenLabs(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion] = pydantic.Field(default=None) - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId = pydantic.Field() - """ - Eleven Labs model ID - """ - - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_version.py deleted file mode 100644 index 5440d576..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai.py deleted file mode 100644 index 025f5cc4..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_open_ai_model import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel, -) -from .agent_v1update_speak_speak_one_item_provider_open_ai_version import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion, -) -from .agent_v1update_speak_speak_one_item_provider_open_ai_voice import ( - AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice, -) - - -class AgentV1UpdateSpeakSpeakOneItemProviderOpenAi(UncheckedBaseModel): - version: typing.Optional[AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion] = pydantic.Field(default=None) - """ - The REST API version for the OpenAI text-to-speech API - """ - - model: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel = pydantic.Field() - """ - OpenAI TTS model - """ - - voice: AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice = pydantic.Field() - """ - OpenAI voice - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_model.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_model.py deleted file mode 100644 index f2e71577..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderOpenAiModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_version.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_version.py deleted file mode 100644 index 8d06f519..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_version.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVersion = typing.Union[typing.Literal["v1"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_voice.py b/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_voice.py deleted file mode 100644 index 742d729e..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_open_ai_voice.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakSpeakOneItemProviderOpenAiVoice = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_type.py b/src/deepgram/agent/v1/types/agent_v1update_speak_type.py deleted file mode 100644 index 1c1f127a..00000000 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UpdateSpeakType = typing.Union[typing.Literal["UpdateSpeak"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py b/src/deepgram/agent/v1/types/agent_v1update_think.py similarity index 65% rename from src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py rename to src/deepgram/agent/v1/types/agent_v1update_think.py index 783363d3..4236c4b7 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_one_item_provider_cartesia_voice.py +++ b/src/deepgram/agent/v1/types/agent_v1update_think.py @@ -5,18 +5,16 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel +from .agent_v1update_think_think import AgentV1UpdateThinkThink -class AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice(UncheckedBaseModel): - mode: str = pydantic.Field() +class AgentV1UpdateThink(UncheckedBaseModel): + type: typing.Literal["UpdateThink"] = pydantic.Field(default="UpdateThink") """ - Cartesia voice mode + Message type identifier for updating the think model """ - id: str = pydantic.Field() - """ - Cartesia voice ID - """ + think: AgentV1UpdateThinkThink if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/deepgram/agent/v1/types/agent_v1update_think_think.py b/src/deepgram/agent/v1/types/agent_v1update_think_think.py new file mode 100644 index 00000000..93e087b5 --- /dev/null +++ b/src/deepgram/agent/v1/types/agent_v1update_think_think.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ....types.think_settings_v1 import ThinkSettingsV1 + +AgentV1UpdateThinkThink = typing.Union[ThinkSettingsV1, typing.List[ThinkSettingsV1]] diff --git a/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py index 7f991033..afc96fed 100644 --- a/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py +++ b/src/deepgram/agent/v1/types/agent_v1user_started_speaking.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1user_started_speaking_type import AgentV1UserStartedSpeakingType class AgentV1UserStartedSpeaking(UncheckedBaseModel): - type: AgentV1UserStartedSpeakingType = pydantic.Field() + type: typing.Literal["UserStartedSpeaking"] = pydantic.Field(default="UserStartedSpeaking") """ Message type identifier indicating that the user has begun speaking """ diff --git a/src/deepgram/agent/v1/types/agent_v1user_started_speaking_type.py b/src/deepgram/agent/v1/types/agent_v1user_started_speaking_type.py deleted file mode 100644 index b23ebccb..00000000 --- a/src/deepgram/agent/v1/types/agent_v1user_started_speaking_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1UserStartedSpeakingType = typing.Union[typing.Literal["UserStartedSpeaking"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1warning.py b/src/deepgram/agent/v1/types/agent_v1warning.py index 4ac416c5..c4741050 100644 --- a/src/deepgram/agent/v1/types/agent_v1warning.py +++ b/src/deepgram/agent/v1/types/agent_v1warning.py @@ -5,7 +5,6 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1warning_type import AgentV1WarningType class AgentV1Warning(UncheckedBaseModel): @@ -13,7 +12,7 @@ class AgentV1Warning(UncheckedBaseModel): Notifies the client of non-fatal errors or warnings """ - type: AgentV1WarningType = pydantic.Field() + type: typing.Literal["Warning"] = pydantic.Field(default="Warning") """ Message type identifier for warnings """ diff --git a/src/deepgram/agent/v1/types/agent_v1welcome.py b/src/deepgram/agent/v1/types/agent_v1welcome.py index c9c04406..128700c0 100644 --- a/src/deepgram/agent/v1/types/agent_v1welcome.py +++ b/src/deepgram/agent/v1/types/agent_v1welcome.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1welcome_type import AgentV1WelcomeType class AgentV1Welcome(UncheckedBaseModel): - type: AgentV1WelcomeType = pydantic.Field() + type: typing.Literal["Welcome"] = pydantic.Field(default="Welcome") """ Message type identifier for welcome message """ diff --git a/src/deepgram/agent/v1/types/agent_v1welcome_type.py b/src/deepgram/agent/v1/types/agent_v1welcome_type.py deleted file mode 100644 index d2902f24..00000000 --- a/src/deepgram/agent/v1/types/agent_v1welcome_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentV1WelcomeType = typing.Union[typing.Literal["Welcome"], typing.Any] diff --git a/src/deepgram/agent/v1/types/cartesia.py b/src/deepgram/agent/v1/types/cartesia.py deleted file mode 100644 index d5fa86be..00000000 --- a/src/deepgram/agent/v1/types/cartesia.py +++ /dev/null @@ -1,43 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_cartesia_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_version import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion, -) -from .agent_v1settings_agent_speak_one_item_provider_cartesia_voice import ( - AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice, -) - - -class Cartesia(UncheckedBaseModel): - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderCartesiaVersion] = pydantic.Field(default=None) - """ - The API version header for the Cartesia text-to-speech API - """ - - model_id: AgentV1SettingsAgentSpeakOneItemProviderCartesiaModelId = pydantic.Field() - """ - Cartesia model ID - """ - - voice: AgentV1SettingsAgentSpeakOneItemProviderCartesiaVoice - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Cartesia language code - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/deepgram.py b/src/deepgram/agent/v1/types/deepgram.py deleted file mode 100644 index 78fec994..00000000 --- a/src/deepgram/agent/v1/types/deepgram.py +++ /dev/null @@ -1,34 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_deepgram_model import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel, -) -from .agent_v1settings_agent_speak_one_item_provider_deepgram_version import ( - AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion, -) - - -class Deepgram(UncheckedBaseModel): - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderDeepgramVersion] = pydantic.Field(default=None) - """ - The REST API version for the Deepgram text-to-speech API - """ - - model: AgentV1SettingsAgentSpeakOneItemProviderDeepgramModel = pydantic.Field() - """ - Deepgram TTS model - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/eleven_labs.py b/src/deepgram/agent/v1/types/eleven_labs.py deleted file mode 100644 index ef3ea856..00000000 --- a/src/deepgram/agent/v1/types/eleven_labs.py +++ /dev/null @@ -1,44 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_model_id import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId, -) -from .agent_v1settings_agent_speak_one_item_provider_eleven_labs_version import ( - AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion, -) - - -class ElevenLabs(UncheckedBaseModel): - version: typing.Optional[AgentV1SettingsAgentSpeakOneItemProviderElevenLabsVersion] = pydantic.Field(default=None) - """ - The REST API version for the ElevenLabs text-to-speech API - """ - - model_id: AgentV1SettingsAgentSpeakOneItemProviderElevenLabsModelId = pydantic.Field() - """ - Eleven Labs model ID - """ - - language: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API - """ - - language_code: typing.Optional[str] = pydantic.Field(default=None) - """ - Use the `language` field instead. - """ - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/base_client.py b/src/deepgram/base_client.py index 7ee628e3..e39417ff 100644 --- a/src/deepgram/base_client.py +++ b/src/deepgram/base_client.py @@ -19,6 +19,7 @@ from .read.client import AsyncReadClient, ReadClient from .self_hosted.client import AsyncSelfHostedClient, SelfHostedClient from .speak.client import AsyncSpeakClient, SpeakClient + from .voice_agent.client import AsyncVoiceAgentClient, VoiceAgentClient class BaseClient: @@ -98,6 +99,7 @@ def __init__( self._read: typing.Optional[ReadClient] = None self._self_hosted: typing.Optional[SelfHostedClient] = None self._speak: typing.Optional[SpeakClient] = None + self._voice_agent: typing.Optional[VoiceAgentClient] = None @property def agent(self): @@ -155,6 +157,14 @@ def speak(self): self._speak = SpeakClient(client_wrapper=self._client_wrapper) return self._speak + @property + def voice_agent(self): + if self._voice_agent is None: + from .voice_agent.client import VoiceAgentClient # noqa: E402 + + self._voice_agent = VoiceAgentClient(client_wrapper=self._client_wrapper) + return self._voice_agent + class AsyncBaseClient: """ @@ -233,6 +243,7 @@ def __init__( self._read: typing.Optional[AsyncReadClient] = None self._self_hosted: typing.Optional[AsyncSelfHostedClient] = None self._speak: typing.Optional[AsyncSpeakClient] = None + self._voice_agent: typing.Optional[AsyncVoiceAgentClient] = None @property def agent(self): @@ -289,3 +300,11 @@ def speak(self): self._speak = AsyncSpeakClient(client_wrapper=self._client_wrapper) return self._speak + + @property + def voice_agent(self): + if self._voice_agent is None: + from .voice_agent.client import AsyncVoiceAgentClient # noqa: E402 + + self._voice_agent = AsyncVoiceAgentClient(client_wrapper=self._client_wrapper) + return self._voice_agent diff --git a/src/deepgram/core/client_wrapper.py b/src/deepgram/core/client_wrapper.py index 51402d11..89065f2c 100644 --- a/src/deepgram/core/client_wrapper.py +++ b/src/deepgram/core/client_wrapper.py @@ -28,12 +28,12 @@ def get_headers(self) -> typing.Dict[str, str]: import platform headers: typing.Dict[str, str] = { - "User-Agent": "deepgram-sdk/6.0.2", + "User-Agent": "deepgram-sdk/6.1.2", "X-Fern-Language": "Python", "X-Fern-Runtime": f"python/{platform.python_version()}", "X-Fern-Platform": f"{platform.system().lower()}/{platform.release()}", "X-Fern-SDK-Name": "deepgram-sdk", - "X-Fern-SDK-Version": "6.0.2", + "X-Fern-SDK-Version": "6.1.2", **(self.get_custom_headers() or {}), } headers["Authorization"] = f"Token {self.api_key}" diff --git a/src/deepgram/listen/__init__.py b/src/deepgram/listen/__init__.py index 236cdc61..1240afba 100644 --- a/src/deepgram/listen/__init__.py +++ b/src/deepgram/listen/__init__.py @@ -19,7 +19,6 @@ ListenV1KeepAliveType, ListenV1Metadata, ListenV1MetadataParams, - ListenV1MetadataType, ListenV1Results, ListenV1ResultsChannel, ListenV1ResultsChannelAlternativesItem, @@ -34,28 +33,24 @@ ListenV1ResultsMetadataModelInfoParams, ListenV1ResultsMetadataParams, ListenV1ResultsParams, - ListenV1ResultsType, ListenV1SpeechStarted, ListenV1SpeechStartedParams, - ListenV1SpeechStartedType, ListenV1UtteranceEnd, ListenV1UtteranceEndParams, - ListenV1UtteranceEndType, ) from .v2 import ( ListenV2CloseStream, ListenV2CloseStreamParams, ListenV2CloseStreamType, + ListenV2ConfigureFailure, + ListenV2ConfigureFailureParams, ListenV2Connected, ListenV2ConnectedParams, - ListenV2ConnectedType, ListenV2FatalError, ListenV2FatalErrorParams, - ListenV2FatalErrorType, ListenV2TurnInfo, ListenV2TurnInfoEvent, ListenV2TurnInfoParams, - ListenV2TurnInfoType, ListenV2TurnInfoWordsItem, ListenV2TurnInfoWordsItemParams, ) @@ -71,7 +66,6 @@ "ListenV1KeepAliveType": ".v1", "ListenV1Metadata": ".v1", "ListenV1MetadataParams": ".v1", - "ListenV1MetadataType": ".v1", "ListenV1Results": ".v1", "ListenV1ResultsChannel": ".v1", "ListenV1ResultsChannelAlternativesItem": ".v1", @@ -86,26 +80,22 @@ "ListenV1ResultsMetadataModelInfoParams": ".v1", "ListenV1ResultsMetadataParams": ".v1", "ListenV1ResultsParams": ".v1", - "ListenV1ResultsType": ".v1", "ListenV1SpeechStarted": ".v1", "ListenV1SpeechStartedParams": ".v1", - "ListenV1SpeechStartedType": ".v1", "ListenV1UtteranceEnd": ".v1", "ListenV1UtteranceEndParams": ".v1", - "ListenV1UtteranceEndType": ".v1", "ListenV2CloseStream": ".v2", "ListenV2CloseStreamParams": ".v2", "ListenV2CloseStreamType": ".v2", + "ListenV2ConfigureFailure": ".v2", + "ListenV2ConfigureFailureParams": ".v2", "ListenV2Connected": ".v2", "ListenV2ConnectedParams": ".v2", - "ListenV2ConnectedType": ".v2", "ListenV2FatalError": ".v2", "ListenV2FatalErrorParams": ".v2", - "ListenV2FatalErrorType": ".v2", "ListenV2TurnInfo": ".v2", "ListenV2TurnInfoEvent": ".v2", "ListenV2TurnInfoParams": ".v2", - "ListenV2TurnInfoType": ".v2", "ListenV2TurnInfoWordsItem": ".v2", "ListenV2TurnInfoWordsItemParams": ".v2", "v1": ".v1", @@ -146,7 +136,6 @@ def __dir__(): "ListenV1KeepAliveType", "ListenV1Metadata", "ListenV1MetadataParams", - "ListenV1MetadataType", "ListenV1Results", "ListenV1ResultsChannel", "ListenV1ResultsChannelAlternativesItem", @@ -161,26 +150,22 @@ def __dir__(): "ListenV1ResultsMetadataModelInfoParams", "ListenV1ResultsMetadataParams", "ListenV1ResultsParams", - "ListenV1ResultsType", "ListenV1SpeechStarted", "ListenV1SpeechStartedParams", - "ListenV1SpeechStartedType", "ListenV1UtteranceEnd", "ListenV1UtteranceEndParams", - "ListenV1UtteranceEndType", "ListenV2CloseStream", "ListenV2CloseStreamParams", "ListenV2CloseStreamType", + "ListenV2ConfigureFailure", + "ListenV2ConfigureFailureParams", "ListenV2Connected", "ListenV2ConnectedParams", - "ListenV2ConnectedType", "ListenV2FatalError", "ListenV2FatalErrorParams", - "ListenV2FatalErrorType", "ListenV2TurnInfo", "ListenV2TurnInfoEvent", "ListenV2TurnInfoParams", - "ListenV2TurnInfoType", "ListenV2TurnInfoWordsItem", "ListenV2TurnInfoWordsItemParams", "v1", diff --git a/src/deepgram/listen/v1/__init__.py b/src/deepgram/listen/v1/__init__.py index a8115d75..8b43c4ff 100644 --- a/src/deepgram/listen/v1/__init__.py +++ b/src/deepgram/listen/v1/__init__.py @@ -14,7 +14,6 @@ ListenV1KeepAlive, ListenV1KeepAliveType, ListenV1Metadata, - ListenV1MetadataType, ListenV1Results, ListenV1ResultsChannel, ListenV1ResultsChannelAlternativesItem, @@ -22,11 +21,8 @@ ListenV1ResultsEntitiesItem, ListenV1ResultsMetadata, ListenV1ResultsMetadataModelInfo, - ListenV1ResultsType, ListenV1SpeechStarted, - ListenV1SpeechStartedType, ListenV1UtteranceEnd, - ListenV1UtteranceEndType, ) from . import media from .media import ( @@ -34,9 +30,9 @@ MediaTranscribeRequestCustomIntentMode, MediaTranscribeRequestCustomTopicMode, MediaTranscribeRequestEncoding, - MediaTranscribeRequestModelZero, - MediaTranscribeRequestSummarizeZero, - MediaTranscribeRequestVersionZero, + MediaTranscribeRequestModel, + MediaTranscribeRequestSummarize, + MediaTranscribeRequestVersion, MediaTranscribeResponse, MediaTranscribeResponseParams, ) @@ -67,7 +63,6 @@ "ListenV1KeepAliveType": ".types", "ListenV1Metadata": ".types", "ListenV1MetadataParams": ".requests", - "ListenV1MetadataType": ".types", "ListenV1Results": ".types", "ListenV1ResultsChannel": ".types", "ListenV1ResultsChannelAlternativesItem": ".types", @@ -82,20 +77,17 @@ "ListenV1ResultsMetadataModelInfoParams": ".requests", "ListenV1ResultsMetadataParams": ".requests", "ListenV1ResultsParams": ".requests", - "ListenV1ResultsType": ".types", "ListenV1SpeechStarted": ".types", "ListenV1SpeechStartedParams": ".requests", - "ListenV1SpeechStartedType": ".types", "ListenV1UtteranceEnd": ".types", "ListenV1UtteranceEndParams": ".requests", - "ListenV1UtteranceEndType": ".types", "MediaTranscribeRequestCallbackMethod": ".media", "MediaTranscribeRequestCustomIntentMode": ".media", "MediaTranscribeRequestCustomTopicMode": ".media", "MediaTranscribeRequestEncoding": ".media", - "MediaTranscribeRequestModelZero": ".media", - "MediaTranscribeRequestSummarizeZero": ".media", - "MediaTranscribeRequestVersionZero": ".media", + "MediaTranscribeRequestModel": ".media", + "MediaTranscribeRequestSummarize": ".media", + "MediaTranscribeRequestVersion": ".media", "MediaTranscribeResponse": ".media", "MediaTranscribeResponseParams": ".media", "media": ".media", @@ -135,7 +127,6 @@ def __dir__(): "ListenV1KeepAliveType", "ListenV1Metadata", "ListenV1MetadataParams", - "ListenV1MetadataType", "ListenV1Results", "ListenV1ResultsChannel", "ListenV1ResultsChannelAlternativesItem", @@ -150,20 +141,17 @@ def __dir__(): "ListenV1ResultsMetadataModelInfoParams", "ListenV1ResultsMetadataParams", "ListenV1ResultsParams", - "ListenV1ResultsType", "ListenV1SpeechStarted", "ListenV1SpeechStartedParams", - "ListenV1SpeechStartedType", "ListenV1UtteranceEnd", "ListenV1UtteranceEndParams", - "ListenV1UtteranceEndType", "MediaTranscribeRequestCallbackMethod", "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", "MediaTranscribeRequestEncoding", - "MediaTranscribeRequestModelZero", - "MediaTranscribeRequestSummarizeZero", - "MediaTranscribeRequestVersionZero", + "MediaTranscribeRequestModel", + "MediaTranscribeRequestSummarize", + "MediaTranscribeRequestVersion", "MediaTranscribeResponse", "MediaTranscribeResponseParams", "media", diff --git a/src/deepgram/listen/v1/media/__init__.py b/src/deepgram/listen/v1/media/__init__.py index 6cadb71a..495ed32d 100644 --- a/src/deepgram/listen/v1/media/__init__.py +++ b/src/deepgram/listen/v1/media/__init__.py @@ -11,9 +11,9 @@ MediaTranscribeRequestCustomIntentMode, MediaTranscribeRequestCustomTopicMode, MediaTranscribeRequestEncoding, - MediaTranscribeRequestModelZero, - MediaTranscribeRequestSummarizeZero, - MediaTranscribeRequestVersionZero, + MediaTranscribeRequestModel, + MediaTranscribeRequestSummarize, + MediaTranscribeRequestVersion, MediaTranscribeResponse, ) from .requests import MediaTranscribeResponseParams @@ -22,9 +22,9 @@ "MediaTranscribeRequestCustomIntentMode": ".types", "MediaTranscribeRequestCustomTopicMode": ".types", "MediaTranscribeRequestEncoding": ".types", - "MediaTranscribeRequestModelZero": ".types", - "MediaTranscribeRequestSummarizeZero": ".types", - "MediaTranscribeRequestVersionZero": ".types", + "MediaTranscribeRequestModel": ".types", + "MediaTranscribeRequestSummarize": ".types", + "MediaTranscribeRequestVersion": ".types", "MediaTranscribeResponse": ".types", "MediaTranscribeResponseParams": ".requests", } @@ -56,9 +56,9 @@ def __dir__(): "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", "MediaTranscribeRequestEncoding", - "MediaTranscribeRequestModelZero", - "MediaTranscribeRequestSummarizeZero", - "MediaTranscribeRequestVersionZero", + "MediaTranscribeRequestModel", + "MediaTranscribeRequestSummarize", + "MediaTranscribeRequestVersion", "MediaTranscribeResponse", "MediaTranscribeResponseParams", ] diff --git a/src/deepgram/listen/v1/media/client.py b/src/deepgram/listen/v1/media/client.py index bbc8b98f..ab81ae3b 100644 --- a/src/deepgram/listen/v1/media/client.py +++ b/src/deepgram/listen/v1/media/client.py @@ -9,9 +9,9 @@ from .types.media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .types.media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode from .types.media_transcribe_request_encoding import MediaTranscribeRequestEncoding -from .types.media_transcribe_request_model_zero import MediaTranscribeRequestModelZero -from .types.media_transcribe_request_summarize_zero import MediaTranscribeRequestSummarizeZero -from .types.media_transcribe_request_version_zero import MediaTranscribeRequestVersionZero +from .types.media_transcribe_request_model import MediaTranscribeRequestModel +from .types.media_transcribe_request_summarize import MediaTranscribeRequestSummarize +from .types.media_transcribe_request_version import MediaTranscribeRequestVersion from .types.media_transcribe_response import MediaTranscribeResponse # this is used as the default value for optional parameters @@ -41,7 +41,7 @@ def transcribe_url( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -59,7 +59,7 @@ def transcribe_url( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -71,7 +71,7 @@ def transcribe_url( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> MediaTranscribeResponse: @@ -94,7 +94,7 @@ def transcribe_url( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -148,7 +148,7 @@ def transcribe_url( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -184,7 +184,7 @@ def transcribe_url( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -294,7 +294,7 @@ def transcribe_file( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -312,7 +312,7 @@ def transcribe_file( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -324,7 +324,7 @@ def transcribe_file( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> MediaTranscribeResponse: @@ -347,7 +347,7 @@ def transcribe_file( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -401,7 +401,7 @@ def transcribe_file( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -437,7 +437,7 @@ def transcribe_file( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -526,7 +526,7 @@ async def transcribe_url( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -544,7 +544,7 @@ async def transcribe_url( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -556,7 +556,7 @@ async def transcribe_url( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> MediaTranscribeResponse: @@ -579,7 +579,7 @@ async def transcribe_url( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -633,7 +633,7 @@ async def transcribe_url( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -669,7 +669,7 @@ async def transcribe_url( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -787,7 +787,7 @@ async def transcribe_file( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -805,7 +805,7 @@ async def transcribe_file( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -817,7 +817,7 @@ async def transcribe_file( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> MediaTranscribeResponse: @@ -840,7 +840,7 @@ async def transcribe_file( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -894,7 +894,7 @@ async def transcribe_file( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -930,7 +930,7 @@ async def transcribe_file( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] diff --git a/src/deepgram/listen/v1/media/raw_client.py b/src/deepgram/listen/v1/media/raw_client.py index 88d1c5e9..830d4259 100644 --- a/src/deepgram/listen/v1/media/raw_client.py +++ b/src/deepgram/listen/v1/media/raw_client.py @@ -13,9 +13,9 @@ from .types.media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .types.media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode from .types.media_transcribe_request_encoding import MediaTranscribeRequestEncoding -from .types.media_transcribe_request_model_zero import MediaTranscribeRequestModelZero -from .types.media_transcribe_request_summarize_zero import MediaTranscribeRequestSummarizeZero -from .types.media_transcribe_request_version_zero import MediaTranscribeRequestVersionZero +from .types.media_transcribe_request_model import MediaTranscribeRequestModel +from .types.media_transcribe_request_summarize import MediaTranscribeRequestSummarize +from .types.media_transcribe_request_version import MediaTranscribeRequestVersion from .types.media_transcribe_response import MediaTranscribeResponse # this is used as the default value for optional parameters @@ -34,7 +34,7 @@ def transcribe_url( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -52,7 +52,7 @@ def transcribe_url( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -64,7 +64,7 @@ def transcribe_url( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[MediaTranscribeResponse]: @@ -87,7 +87,7 @@ def transcribe_url( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -141,7 +141,7 @@ def transcribe_url( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -177,7 +177,7 @@ def transcribe_url( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -276,7 +276,7 @@ def transcribe_file( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -294,7 +294,7 @@ def transcribe_file( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -306,7 +306,7 @@ def transcribe_file( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[MediaTranscribeResponse]: @@ -329,7 +329,7 @@ def transcribe_file( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -383,7 +383,7 @@ def transcribe_file( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -419,7 +419,7 @@ def transcribe_file( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -521,7 +521,7 @@ async def transcribe_url( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -539,7 +539,7 @@ async def transcribe_url( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -551,7 +551,7 @@ async def transcribe_url( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[MediaTranscribeResponse]: @@ -574,7 +574,7 @@ async def transcribe_url( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -628,7 +628,7 @@ async def transcribe_url( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -664,7 +664,7 @@ async def transcribe_url( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] @@ -763,7 +763,7 @@ async def transcribe_file( callback_method: typing.Optional[MediaTranscribeRequestCallbackMethod] = None, extra: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[MediaTranscribeRequestSummarizeZero] = None, + summarize: typing.Optional[MediaTranscribeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -781,7 +781,7 @@ async def transcribe_file( keywords: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, language: typing.Optional[str] = None, measurements: typing.Optional[bool] = None, - model: typing.Optional[MediaTranscribeRequestModelZero] = None, + model: typing.Optional[MediaTranscribeRequestModel] = None, multichannel: typing.Optional[bool] = None, numerals: typing.Optional[bool] = None, paragraphs: typing.Optional[bool] = None, @@ -793,7 +793,7 @@ async def transcribe_file( smart_format: typing.Optional[bool] = None, utterances: typing.Optional[bool] = None, utt_split: typing.Optional[float] = None, - version: typing.Optional[MediaTranscribeRequestVersionZero] = None, + version: typing.Optional[MediaTranscribeRequestVersion] = None, mip_opt_out: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[MediaTranscribeResponse]: @@ -816,7 +816,7 @@ async def transcribe_file( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[MediaTranscribeRequestSummarizeZero] + summarize : typing.Optional[MediaTranscribeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -870,7 +870,7 @@ async def transcribe_file( measurements : typing.Optional[bool] Spoken measurements will be converted to their corresponding abbreviations - model : typing.Optional[MediaTranscribeRequestModelZero] + model : typing.Optional[MediaTranscribeRequestModel] AI model used to process submitted audio multichannel : typing.Optional[bool] @@ -906,7 +906,7 @@ async def transcribe_file( utt_split : typing.Optional[float] Seconds to wait before detecting a pause between words in submitted audio - version : typing.Optional[MediaTranscribeRequestVersionZero] + version : typing.Optional[MediaTranscribeRequestVersion] Version of an AI model to use mip_opt_out : typing.Optional[bool] diff --git a/src/deepgram/listen/v1/media/types/__init__.py b/src/deepgram/listen/v1/media/types/__init__.py index 81d1d91b..d0ff0fb9 100644 --- a/src/deepgram/listen/v1/media/types/__init__.py +++ b/src/deepgram/listen/v1/media/types/__init__.py @@ -10,18 +10,18 @@ from .media_transcribe_request_custom_intent_mode import MediaTranscribeRequestCustomIntentMode from .media_transcribe_request_custom_topic_mode import MediaTranscribeRequestCustomTopicMode from .media_transcribe_request_encoding import MediaTranscribeRequestEncoding - from .media_transcribe_request_model_zero import MediaTranscribeRequestModelZero - from .media_transcribe_request_summarize_zero import MediaTranscribeRequestSummarizeZero - from .media_transcribe_request_version_zero import MediaTranscribeRequestVersionZero + from .media_transcribe_request_model import MediaTranscribeRequestModel + from .media_transcribe_request_summarize import MediaTranscribeRequestSummarize + from .media_transcribe_request_version import MediaTranscribeRequestVersion from .media_transcribe_response import MediaTranscribeResponse _dynamic_imports: typing.Dict[str, str] = { "MediaTranscribeRequestCallbackMethod": ".media_transcribe_request_callback_method", "MediaTranscribeRequestCustomIntentMode": ".media_transcribe_request_custom_intent_mode", "MediaTranscribeRequestCustomTopicMode": ".media_transcribe_request_custom_topic_mode", "MediaTranscribeRequestEncoding": ".media_transcribe_request_encoding", - "MediaTranscribeRequestModelZero": ".media_transcribe_request_model_zero", - "MediaTranscribeRequestSummarizeZero": ".media_transcribe_request_summarize_zero", - "MediaTranscribeRequestVersionZero": ".media_transcribe_request_version_zero", + "MediaTranscribeRequestModel": ".media_transcribe_request_model", + "MediaTranscribeRequestSummarize": ".media_transcribe_request_summarize", + "MediaTranscribeRequestVersion": ".media_transcribe_request_version", "MediaTranscribeResponse": ".media_transcribe_response", } @@ -52,8 +52,8 @@ def __dir__(): "MediaTranscribeRequestCustomIntentMode", "MediaTranscribeRequestCustomTopicMode", "MediaTranscribeRequestEncoding", - "MediaTranscribeRequestModelZero", - "MediaTranscribeRequestSummarizeZero", - "MediaTranscribeRequestVersionZero", + "MediaTranscribeRequestModel", + "MediaTranscribeRequestSummarize", + "MediaTranscribeRequestVersion", "MediaTranscribeResponse", ] diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_model_zero.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_model.py similarity index 94% rename from src/deepgram/listen/v1/media/types/media_transcribe_request_model_zero.py rename to src/deepgram/listen/v1/media/types/media_transcribe_request_model.py index 6608514c..eaebbc7a 100644 --- a/src/deepgram/listen/v1/media/types/media_transcribe_request_model_zero.py +++ b/src/deepgram/listen/v1/media/types/media_transcribe_request_model.py @@ -2,7 +2,7 @@ import typing -MediaTranscribeRequestModelZero = typing.Union[ +MediaTranscribeRequestModel = typing.Union[ typing.Literal[ "nova-3", "nova-3-general", diff --git a/src/deepgram/read/v1/text/types/text_analyze_request_summarize_zero.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py similarity index 60% rename from src/deepgram/read/v1/text/types/text_analyze_request_summarize_zero.py rename to src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py index 4552004e..cc44a279 100644 --- a/src/deepgram/read/v1/text/types/text_analyze_request_summarize_zero.py +++ b/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize.py @@ -2,4 +2,4 @@ import typing -TextAnalyzeRequestSummarizeZero = typing.Union[typing.Literal["v2"], typing.Any] +MediaTranscribeRequestSummarize = typing.Union[typing.Literal["v2"], typing.Any] diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize_zero.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize_zero.py deleted file mode 100644 index a91b33f1..00000000 --- a/src/deepgram/listen/v1/media/types/media_transcribe_request_summarize_zero.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -MediaTranscribeRequestSummarizeZero = typing.Union[typing.Literal["v2"], typing.Any] diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_version.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_version.py new file mode 100644 index 00000000..e6ae1cd5 --- /dev/null +++ b/src/deepgram/listen/v1/media/types/media_transcribe_request_version.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +MediaTranscribeRequestVersion = typing.Union[typing.Literal["latest"], typing.Any] diff --git a/src/deepgram/listen/v1/media/types/media_transcribe_request_version_zero.py b/src/deepgram/listen/v1/media/types/media_transcribe_request_version_zero.py deleted file mode 100644 index 032f5c31..00000000 --- a/src/deepgram/listen/v1/media/types/media_transcribe_request_version_zero.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -MediaTranscribeRequestVersionZero = typing.Union[typing.Literal["latest"], typing.Any] diff --git a/src/deepgram/listen/v1/requests/listen_v1metadata.py b/src/deepgram/listen/v1/requests/listen_v1metadata.py index c6564659..2b648b48 100644 --- a/src/deepgram/listen/v1/requests/listen_v1metadata.py +++ b/src/deepgram/listen/v1/requests/listen_v1metadata.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.listen_v1metadata_type import ListenV1MetadataType class ListenV1MetadataParams(typing_extensions.TypedDict): - type: ListenV1MetadataType + type: typing.Literal["Metadata"] """ Message type identifier """ diff --git a/src/deepgram/listen/v1/requests/listen_v1results.py b/src/deepgram/listen/v1/requests/listen_v1results.py index 31577afa..96e152f0 100644 --- a/src/deepgram/listen/v1/requests/listen_v1results.py +++ b/src/deepgram/listen/v1/requests/listen_v1results.py @@ -3,14 +3,13 @@ import typing import typing_extensions -from ..types.listen_v1results_type import ListenV1ResultsType from .listen_v1results_channel import ListenV1ResultsChannelParams from .listen_v1results_entities_item import ListenV1ResultsEntitiesItemParams from .listen_v1results_metadata import ListenV1ResultsMetadataParams class ListenV1ResultsParams(typing_extensions.TypedDict): - type: ListenV1ResultsType + type: typing.Literal["Results"] """ Message type identifier """ diff --git a/src/deepgram/listen/v1/requests/listen_v1speech_started.py b/src/deepgram/listen/v1/requests/listen_v1speech_started.py index 5741ca46..1cc1dcfa 100644 --- a/src/deepgram/listen/v1/requests/listen_v1speech_started.py +++ b/src/deepgram/listen/v1/requests/listen_v1speech_started.py @@ -3,11 +3,10 @@ import typing import typing_extensions -from ..types.listen_v1speech_started_type import ListenV1SpeechStartedType class ListenV1SpeechStartedParams(typing_extensions.TypedDict): - type: ListenV1SpeechStartedType + type: typing.Literal["SpeechStarted"] """ Message type identifier """ diff --git a/src/deepgram/listen/v1/requests/listen_v1utterance_end.py b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py index cac51946..37ae57b8 100644 --- a/src/deepgram/listen/v1/requests/listen_v1utterance_end.py +++ b/src/deepgram/listen/v1/requests/listen_v1utterance_end.py @@ -3,11 +3,10 @@ import typing import typing_extensions -from ..types.listen_v1utterance_end_type import ListenV1UtteranceEndType class ListenV1UtteranceEndParams(typing_extensions.TypedDict): - type: ListenV1UtteranceEndType + type: typing.Literal["UtteranceEnd"] """ Message type identifier """ diff --git a/src/deepgram/listen/v1/socket_client.py b/src/deepgram/listen/v1/socket_client.py index 387c107f..011786d2 100644 --- a/src/deepgram/listen/v1/socket_client.py +++ b/src/deepgram/listen/v1/socket_client.py @@ -37,7 +37,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -62,14 +62,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -81,26 +81,26 @@ async def send_media(self, message: bytes) -> None: """ await self._send(message) - async def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: + async def send_finalize(self, message: ListenV1Finalize) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1Finalize. """ - await self._send_model(message or ListenV1Finalize(type="Finalize")) + await self._send_model(message) - async def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: + async def send_close_stream(self, message: ListenV1CloseStream) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1CloseStream. """ - await self._send_model(message or ListenV1CloseStream(type="CloseStream")) + await self._send_model(message) - async def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: + async def send_keep_alive(self, message: ListenV1KeepAlive) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1KeepAlive. """ - await self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) + await self._send_model(message) async def recv(self) -> V1SocketClientResponse: """ @@ -111,7 +111,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -142,7 +142,7 @@ def __iter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -167,14 +167,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -186,26 +186,26 @@ def send_media(self, message: bytes) -> None: """ self._send(message) - def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: + def send_finalize(self, message: ListenV1Finalize) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1Finalize. """ - self._send_model(message or ListenV1Finalize(type="Finalize")) + self._send_model(message) - def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: + def send_close_stream(self, message: ListenV1CloseStream) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1CloseStream. """ - self._send_model(message or ListenV1CloseStream(type="CloseStream")) + self._send_model(message) - def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: + def send_keep_alive(self, message: ListenV1KeepAlive) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1KeepAlive. """ - self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) + self._send_model(message) def recv(self) -> V1SocketClientResponse: """ @@ -216,7 +216,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore diff --git a/src/deepgram/listen/v1/types/__init__.py b/src/deepgram/listen/v1/types/__init__.py index 4eab0761..30c6849a 100644 --- a/src/deepgram/listen/v1/types/__init__.py +++ b/src/deepgram/listen/v1/types/__init__.py @@ -13,7 +13,6 @@ from .listen_v1keep_alive import ListenV1KeepAlive from .listen_v1keep_alive_type import ListenV1KeepAliveType from .listen_v1metadata import ListenV1Metadata - from .listen_v1metadata_type import ListenV1MetadataType from .listen_v1results import ListenV1Results from .listen_v1results_channel import ListenV1ResultsChannel from .listen_v1results_channel_alternatives_item import ListenV1ResultsChannelAlternativesItem @@ -21,11 +20,8 @@ from .listen_v1results_entities_item import ListenV1ResultsEntitiesItem from .listen_v1results_metadata import ListenV1ResultsMetadata from .listen_v1results_metadata_model_info import ListenV1ResultsMetadataModelInfo - from .listen_v1results_type import ListenV1ResultsType from .listen_v1speech_started import ListenV1SpeechStarted - from .listen_v1speech_started_type import ListenV1SpeechStartedType from .listen_v1utterance_end import ListenV1UtteranceEnd - from .listen_v1utterance_end_type import ListenV1UtteranceEndType _dynamic_imports: typing.Dict[str, str] = { "ListenV1CloseStream": ".listen_v1close_stream", "ListenV1CloseStreamType": ".listen_v1close_stream_type", @@ -34,7 +30,6 @@ "ListenV1KeepAlive": ".listen_v1keep_alive", "ListenV1KeepAliveType": ".listen_v1keep_alive_type", "ListenV1Metadata": ".listen_v1metadata", - "ListenV1MetadataType": ".listen_v1metadata_type", "ListenV1Results": ".listen_v1results", "ListenV1ResultsChannel": ".listen_v1results_channel", "ListenV1ResultsChannelAlternativesItem": ".listen_v1results_channel_alternatives_item", @@ -42,11 +37,8 @@ "ListenV1ResultsEntitiesItem": ".listen_v1results_entities_item", "ListenV1ResultsMetadata": ".listen_v1results_metadata", "ListenV1ResultsMetadataModelInfo": ".listen_v1results_metadata_model_info", - "ListenV1ResultsType": ".listen_v1results_type", "ListenV1SpeechStarted": ".listen_v1speech_started", - "ListenV1SpeechStartedType": ".listen_v1speech_started_type", "ListenV1UtteranceEnd": ".listen_v1utterance_end", - "ListenV1UtteranceEndType": ".listen_v1utterance_end_type", } @@ -79,7 +71,6 @@ def __dir__(): "ListenV1KeepAlive", "ListenV1KeepAliveType", "ListenV1Metadata", - "ListenV1MetadataType", "ListenV1Results", "ListenV1ResultsChannel", "ListenV1ResultsChannelAlternativesItem", @@ -87,9 +78,6 @@ def __dir__(): "ListenV1ResultsEntitiesItem", "ListenV1ResultsMetadata", "ListenV1ResultsMetadataModelInfo", - "ListenV1ResultsType", "ListenV1SpeechStarted", - "ListenV1SpeechStartedType", "ListenV1UtteranceEnd", - "ListenV1UtteranceEndType", ] diff --git a/src/deepgram/listen/v1/types/listen_v1metadata.py b/src/deepgram/listen/v1/types/listen_v1metadata.py index 975bc63f..5a048330 100644 --- a/src/deepgram/listen/v1/types/listen_v1metadata.py +++ b/src/deepgram/listen/v1/types/listen_v1metadata.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .listen_v1metadata_type import ListenV1MetadataType class ListenV1Metadata(UncheckedBaseModel): - type: ListenV1MetadataType = pydantic.Field() + type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata") """ Message type identifier """ diff --git a/src/deepgram/listen/v1/types/listen_v1metadata_type.py b/src/deepgram/listen/v1/types/listen_v1metadata_type.py deleted file mode 100644 index c6e15ad0..00000000 --- a/src/deepgram/listen/v1/types/listen_v1metadata_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV1MetadataType = typing.Union[typing.Literal["Metadata"], typing.Any] diff --git a/src/deepgram/listen/v1/types/listen_v1results.py b/src/deepgram/listen/v1/types/listen_v1results.py index db98dec5..2ac0f05c 100644 --- a/src/deepgram/listen/v1/types/listen_v1results.py +++ b/src/deepgram/listen/v1/types/listen_v1results.py @@ -8,11 +8,10 @@ from .listen_v1results_channel import ListenV1ResultsChannel from .listen_v1results_entities_item import ListenV1ResultsEntitiesItem from .listen_v1results_metadata import ListenV1ResultsMetadata -from .listen_v1results_type import ListenV1ResultsType class ListenV1Results(UncheckedBaseModel): - type: ListenV1ResultsType = pydantic.Field() + type: typing.Literal["Results"] = pydantic.Field(default="Results") """ Message type identifier """ diff --git a/src/deepgram/listen/v1/types/listen_v1results_type.py b/src/deepgram/listen/v1/types/listen_v1results_type.py deleted file mode 100644 index 7d93db8b..00000000 --- a/src/deepgram/listen/v1/types/listen_v1results_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV1ResultsType = typing.Union[typing.Literal["Results"], typing.Any] diff --git a/src/deepgram/listen/v1/types/listen_v1speech_started.py b/src/deepgram/listen/v1/types/listen_v1speech_started.py index 14115993..ff986c28 100644 --- a/src/deepgram/listen/v1/types/listen_v1speech_started.py +++ b/src/deepgram/listen/v1/types/listen_v1speech_started.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .listen_v1speech_started_type import ListenV1SpeechStartedType class ListenV1SpeechStarted(UncheckedBaseModel): - type: ListenV1SpeechStartedType = pydantic.Field() + type: typing.Literal["SpeechStarted"] = pydantic.Field(default="SpeechStarted") """ Message type identifier """ diff --git a/src/deepgram/listen/v1/types/listen_v1speech_started_type.py b/src/deepgram/listen/v1/types/listen_v1speech_started_type.py deleted file mode 100644 index 1fc14b5a..00000000 --- a/src/deepgram/listen/v1/types/listen_v1speech_started_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV1SpeechStartedType = typing.Union[typing.Literal["SpeechStarted"], typing.Any] diff --git a/src/deepgram/listen/v1/types/listen_v1utterance_end.py b/src/deepgram/listen/v1/types/listen_v1utterance_end.py index e6488578..893468fc 100644 --- a/src/deepgram/listen/v1/types/listen_v1utterance_end.py +++ b/src/deepgram/listen/v1/types/listen_v1utterance_end.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .listen_v1utterance_end_type import ListenV1UtteranceEndType class ListenV1UtteranceEnd(UncheckedBaseModel): - type: ListenV1UtteranceEndType = pydantic.Field() + type: typing.Literal["UtteranceEnd"] = pydantic.Field(default="UtteranceEnd") """ Message type identifier """ diff --git a/src/deepgram/listen/v1/types/listen_v1utterance_end_type.py b/src/deepgram/listen/v1/types/listen_v1utterance_end_type.py deleted file mode 100644 index a8348503..00000000 --- a/src/deepgram/listen/v1/types/listen_v1utterance_end_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV1UtteranceEndType = typing.Union[typing.Literal["UtteranceEnd"], typing.Any] diff --git a/src/deepgram/listen/v2/__init__.py b/src/deepgram/listen/v2/__init__.py index 8568cbd8..ab491617 100644 --- a/src/deepgram/listen/v2/__init__.py +++ b/src/deepgram/listen/v2/__init__.py @@ -9,17 +9,16 @@ from .types import ( ListenV2CloseStream, ListenV2CloseStreamType, + ListenV2ConfigureFailure, ListenV2Connected, - ListenV2ConnectedType, ListenV2FatalError, - ListenV2FatalErrorType, ListenV2TurnInfo, ListenV2TurnInfoEvent, - ListenV2TurnInfoType, ListenV2TurnInfoWordsItem, ) from .requests import ( ListenV2CloseStreamParams, + ListenV2ConfigureFailureParams, ListenV2ConnectedParams, ListenV2FatalErrorParams, ListenV2TurnInfoParams, @@ -29,16 +28,15 @@ "ListenV2CloseStream": ".types", "ListenV2CloseStreamParams": ".requests", "ListenV2CloseStreamType": ".types", + "ListenV2ConfigureFailure": ".types", + "ListenV2ConfigureFailureParams": ".requests", "ListenV2Connected": ".types", "ListenV2ConnectedParams": ".requests", - "ListenV2ConnectedType": ".types", "ListenV2FatalError": ".types", "ListenV2FatalErrorParams": ".requests", - "ListenV2FatalErrorType": ".types", "ListenV2TurnInfo": ".types", "ListenV2TurnInfoEvent": ".types", "ListenV2TurnInfoParams": ".requests", - "ListenV2TurnInfoType": ".types", "ListenV2TurnInfoWordsItem": ".types", "ListenV2TurnInfoWordsItemParams": ".requests", } @@ -69,16 +67,15 @@ def __dir__(): "ListenV2CloseStream", "ListenV2CloseStreamParams", "ListenV2CloseStreamType", + "ListenV2ConfigureFailure", + "ListenV2ConfigureFailureParams", "ListenV2Connected", "ListenV2ConnectedParams", - "ListenV2ConnectedType", "ListenV2FatalError", "ListenV2FatalErrorParams", - "ListenV2FatalErrorType", "ListenV2TurnInfo", "ListenV2TurnInfoEvent", "ListenV2TurnInfoParams", - "ListenV2TurnInfoType", "ListenV2TurnInfoWordsItem", "ListenV2TurnInfoWordsItemParams", ] diff --git a/src/deepgram/listen/v2/requests/__init__.py b/src/deepgram/listen/v2/requests/__init__.py index 96ce5ece..49573944 100644 --- a/src/deepgram/listen/v2/requests/__init__.py +++ b/src/deepgram/listen/v2/requests/__init__.py @@ -7,12 +7,14 @@ if typing.TYPE_CHECKING: from .listen_v2close_stream import ListenV2CloseStreamParams + from .listen_v2configure_failure import ListenV2ConfigureFailureParams from .listen_v2connected import ListenV2ConnectedParams from .listen_v2fatal_error import ListenV2FatalErrorParams from .listen_v2turn_info import ListenV2TurnInfoParams from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams _dynamic_imports: typing.Dict[str, str] = { "ListenV2CloseStreamParams": ".listen_v2close_stream", + "ListenV2ConfigureFailureParams": ".listen_v2configure_failure", "ListenV2ConnectedParams": ".listen_v2connected", "ListenV2FatalErrorParams": ".listen_v2fatal_error", "ListenV2TurnInfoParams": ".listen_v2turn_info", @@ -43,6 +45,7 @@ def __dir__(): __all__ = [ "ListenV2CloseStreamParams", + "ListenV2ConfigureFailureParams", "ListenV2ConnectedParams", "ListenV2FatalErrorParams", "ListenV2TurnInfoParams", diff --git a/src/deepgram/listen/v2/requests/listen_v2configure_failure.py b/src/deepgram/listen/v2/requests/listen_v2configure_failure.py new file mode 100644 index 00000000..bfe53f86 --- /dev/null +++ b/src/deepgram/listen/v2/requests/listen_v2configure_failure.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class ListenV2ConfigureFailureParams(typing_extensions.TypedDict): + type: typing.Literal["ConfigureFailure"] + """ + Message type identifier + """ + + request_id: str + """ + The unique identifier of the request + """ + + sequence_id: float + """ + Starts at `0` and increments for each message the server sends + to the client. This includes messages of other types, like + `TurnInfo` messages. + """ diff --git a/src/deepgram/listen/v2/requests/listen_v2connected.py b/src/deepgram/listen/v2/requests/listen_v2connected.py index 97955889..c931eec2 100644 --- a/src/deepgram/listen/v2/requests/listen_v2connected.py +++ b/src/deepgram/listen/v2/requests/listen_v2connected.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.listen_v2connected_type import ListenV2ConnectedType class ListenV2ConnectedParams(typing_extensions.TypedDict): - type: ListenV2ConnectedType + type: typing.Literal["Connected"] """ Message type identifier """ diff --git a/src/deepgram/listen/v2/requests/listen_v2fatal_error.py b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py index c124f959..05cb3041 100644 --- a/src/deepgram/listen/v2/requests/listen_v2fatal_error.py +++ b/src/deepgram/listen/v2/requests/listen_v2fatal_error.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.listen_v2fatal_error_type import ListenV2FatalErrorType class ListenV2FatalErrorParams(typing_extensions.TypedDict): - type: ListenV2FatalErrorType + type: typing.Literal["Error"] """ Message type identifier """ diff --git a/src/deepgram/listen/v2/requests/listen_v2turn_info.py b/src/deepgram/listen/v2/requests/listen_v2turn_info.py index 97cd3eda..6690cd2d 100644 --- a/src/deepgram/listen/v2/requests/listen_v2turn_info.py +++ b/src/deepgram/listen/v2/requests/listen_v2turn_info.py @@ -4,7 +4,6 @@ import typing_extensions from ..types.listen_v2turn_info_event import ListenV2TurnInfoEvent -from ..types.listen_v2turn_info_type import ListenV2TurnInfoType from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItemParams @@ -13,7 +12,7 @@ class ListenV2TurnInfoParams(typing_extensions.TypedDict): Describes the current turn and latest state of the turn """ - type: ListenV2TurnInfoType + type: typing.Literal["TurnInfo"] request_id: str """ The unique identifier of the request @@ -64,3 +63,16 @@ class ListenV2TurnInfoParams(typing_extensions.TypedDict): """ Confidence that no more speech is coming in this turn """ + + languages: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Detected languages sorted by descending frequency in the + transcript. Only present when the flux-general-multi model + detects languages in the audio. + """ + + languages_hinted: typing_extensions.NotRequired[typing.Sequence[str]] + """ + The language hints that were supplied for this turn. Only + present when language hints are configured. + """ diff --git a/src/deepgram/listen/v2/socket_client.py b/src/deepgram/listen/v2/socket_client.py index 4bf24c36..d6dd1e05 100644 --- a/src/deepgram/listen/v2/socket_client.py +++ b/src/deepgram/listen/v2/socket_client.py @@ -10,6 +10,7 @@ from ...core.events import EventEmitterMixin, EventType from ...core.unchecked_base_model import construct_type from .types.listen_v2close_stream import ListenV2CloseStream +from .types.listen_v2configure_failure import ListenV2ConfigureFailure from .types.listen_v2connected import ListenV2Connected from .types.listen_v2fatal_error import ListenV2FatalError from .types.listen_v2turn_info import ListenV2TurnInfo @@ -20,7 +21,9 @@ from websockets import WebSocketClientProtocol # type: ignore _logger = logging.getLogger(__name__) -V2SocketClientResponse = typing.Union[ListenV2Connected, ListenV2TurnInfo, ListenV2FatalError] +V2SocketClientResponse = typing.Union[ + ListenV2Connected, ListenV2TurnInfo, typing.Any, ListenV2ConfigureFailure, ListenV2FatalError +] class AsyncV2SocketClient(EventEmitterMixin): @@ -34,7 +37,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V2SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -59,14 +62,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V2SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -78,12 +81,19 @@ async def send_media(self, message: bytes) -> None: """ await self._send(message) - async def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: + async def send_close_stream(self, message: ListenV2CloseStream) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV2CloseStream. """ - await self._send_model(message or ListenV2CloseStream(type="CloseStream")) + await self._send_model(message) + + async def send_configure(self, message: typing.Any) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a typing.Any. + """ + await self._send(message) async def recv(self) -> V2SocketClientResponse: """ @@ -94,7 +104,7 @@ async def recv(self) -> V2SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V2SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -125,7 +135,7 @@ def __iter__(self): yield message else: try: - yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V2SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -150,14 +160,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V2SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -169,12 +179,19 @@ def send_media(self, message: bytes) -> None: """ self._send(message) - def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: + def send_close_stream(self, message: ListenV2CloseStream) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV2CloseStream. """ - self._send_model(message or ListenV2CloseStream(type="CloseStream")) + self._send_model(message) + + def send_configure(self, message: typing.Any) -> None: + """ + Send a message to the websocket connection. + The message will be sent as a typing.Any. + """ + self._send(message) def recv(self) -> V2SocketClientResponse: """ @@ -185,7 +202,7 @@ def recv(self) -> V2SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V2SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore diff --git a/src/deepgram/listen/v2/types/__init__.py b/src/deepgram/listen/v2/types/__init__.py index e30397cc..91bab1fe 100644 --- a/src/deepgram/listen/v2/types/__init__.py +++ b/src/deepgram/listen/v2/types/__init__.py @@ -8,24 +8,20 @@ if typing.TYPE_CHECKING: from .listen_v2close_stream import ListenV2CloseStream from .listen_v2close_stream_type import ListenV2CloseStreamType + from .listen_v2configure_failure import ListenV2ConfigureFailure from .listen_v2connected import ListenV2Connected - from .listen_v2connected_type import ListenV2ConnectedType from .listen_v2fatal_error import ListenV2FatalError - from .listen_v2fatal_error_type import ListenV2FatalErrorType from .listen_v2turn_info import ListenV2TurnInfo from .listen_v2turn_info_event import ListenV2TurnInfoEvent - from .listen_v2turn_info_type import ListenV2TurnInfoType from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem _dynamic_imports: typing.Dict[str, str] = { "ListenV2CloseStream": ".listen_v2close_stream", "ListenV2CloseStreamType": ".listen_v2close_stream_type", + "ListenV2ConfigureFailure": ".listen_v2configure_failure", "ListenV2Connected": ".listen_v2connected", - "ListenV2ConnectedType": ".listen_v2connected_type", "ListenV2FatalError": ".listen_v2fatal_error", - "ListenV2FatalErrorType": ".listen_v2fatal_error_type", "ListenV2TurnInfo": ".listen_v2turn_info", "ListenV2TurnInfoEvent": ".listen_v2turn_info_event", - "ListenV2TurnInfoType": ".listen_v2turn_info_type", "ListenV2TurnInfoWordsItem": ".listen_v2turn_info_words_item", } @@ -54,12 +50,10 @@ def __dir__(): __all__ = [ "ListenV2CloseStream", "ListenV2CloseStreamType", + "ListenV2ConfigureFailure", "ListenV2Connected", - "ListenV2ConnectedType", "ListenV2FatalError", - "ListenV2FatalErrorType", "ListenV2TurnInfo", "ListenV2TurnInfoEvent", - "ListenV2TurnInfoType", "ListenV2TurnInfoWordsItem", ] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py b/src/deepgram/listen/v2/types/listen_v2configure_failure.py similarity index 53% rename from src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py rename to src/deepgram/listen/v2/types/listen_v2configure_failure.py index 4801692c..b00fc6b1 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_speak_endpoint_provider_cartesia_voice.py +++ b/src/deepgram/listen/v2/types/listen_v2configure_failure.py @@ -7,15 +7,22 @@ from ....core.unchecked_base_model import UncheckedBaseModel -class AgentV1SettingsAgentSpeakEndpointProviderCartesiaVoice(UncheckedBaseModel): - mode: str = pydantic.Field() +class ListenV2ConfigureFailure(UncheckedBaseModel): + type: typing.Literal["ConfigureFailure"] = pydantic.Field(default="ConfigureFailure") """ - Cartesia voice mode + Message type identifier """ - id: str = pydantic.Field() + request_id: str = pydantic.Field() """ - Cartesia voice ID + The unique identifier of the request + """ + + sequence_id: float = pydantic.Field() + """ + Starts at `0` and increments for each message the server sends + to the client. This includes messages of other types, like + `TurnInfo` messages. """ if IS_PYDANTIC_V2: diff --git a/src/deepgram/listen/v2/types/listen_v2connected.py b/src/deepgram/listen/v2/types/listen_v2connected.py index dc60c365..b174be68 100644 --- a/src/deepgram/listen/v2/types/listen_v2connected.py +++ b/src/deepgram/listen/v2/types/listen_v2connected.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .listen_v2connected_type import ListenV2ConnectedType class ListenV2Connected(UncheckedBaseModel): - type: ListenV2ConnectedType = pydantic.Field() + type: typing.Literal["Connected"] = pydantic.Field(default="Connected") """ Message type identifier """ diff --git a/src/deepgram/listen/v2/types/listen_v2connected_type.py b/src/deepgram/listen/v2/types/listen_v2connected_type.py deleted file mode 100644 index f2c23e1f..00000000 --- a/src/deepgram/listen/v2/types/listen_v2connected_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV2ConnectedType = typing.Union[typing.Literal["Connected"], typing.Any] diff --git a/src/deepgram/listen/v2/types/listen_v2fatal_error.py b/src/deepgram/listen/v2/types/listen_v2fatal_error.py index 92b58581..0e65c00e 100644 --- a/src/deepgram/listen/v2/types/listen_v2fatal_error.py +++ b/src/deepgram/listen/v2/types/listen_v2fatal_error.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .listen_v2fatal_error_type import ListenV2FatalErrorType class ListenV2FatalError(UncheckedBaseModel): - type: ListenV2FatalErrorType = pydantic.Field() + type: typing.Literal["Error"] = pydantic.Field(default="Error") """ Message type identifier """ diff --git a/src/deepgram/listen/v2/types/listen_v2fatal_error_type.py b/src/deepgram/listen/v2/types/listen_v2fatal_error_type.py deleted file mode 100644 index bb3395f9..00000000 --- a/src/deepgram/listen/v2/types/listen_v2fatal_error_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV2FatalErrorType = typing.Union[typing.Literal["Error"], typing.Any] diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info.py b/src/deepgram/listen/v2/types/listen_v2turn_info.py index c14590a3..96f05688 100644 --- a/src/deepgram/listen/v2/types/listen_v2turn_info.py +++ b/src/deepgram/listen/v2/types/listen_v2turn_info.py @@ -6,7 +6,6 @@ from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel from .listen_v2turn_info_event import ListenV2TurnInfoEvent -from .listen_v2turn_info_type import ListenV2TurnInfoType from .listen_v2turn_info_words_item import ListenV2TurnInfoWordsItem @@ -15,7 +14,7 @@ class ListenV2TurnInfo(UncheckedBaseModel): Describes the current turn and latest state of the turn """ - type: ListenV2TurnInfoType + type: typing.Literal["TurnInfo"] = "TurnInfo" request_id: str = pydantic.Field() """ The unique identifier of the request @@ -67,6 +66,19 @@ class ListenV2TurnInfo(UncheckedBaseModel): Confidence that no more speech is coming in this turn """ + languages: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Detected languages sorted by descending frequency in the + transcript. Only present when the flux-general-multi model + detects languages in the audio. + """ + + languages_hinted: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + The language hints that were supplied for this turn. Only + present when language hints are configured. + """ + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/deepgram/listen/v2/types/listen_v2turn_info_type.py b/src/deepgram/listen/v2/types/listen_v2turn_info_type.py deleted file mode 100644 index 5ec2c86a..00000000 --- a/src/deepgram/listen/v2/types/listen_v2turn_info_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ListenV2TurnInfoType = typing.Union[typing.Literal["TurnInfo"], typing.Any] diff --git a/src/deepgram/read/v1/__init__.py b/src/deepgram/read/v1/__init__.py index 56569a29..6e2939a4 100644 --- a/src/deepgram/read/v1/__init__.py +++ b/src/deepgram/read/v1/__init__.py @@ -11,13 +11,13 @@ TextAnalyzeRequestCallbackMethod, TextAnalyzeRequestCustomIntentMode, TextAnalyzeRequestCustomTopicMode, - TextAnalyzeRequestSummarizeZero, + TextAnalyzeRequestSummarize, ) _dynamic_imports: typing.Dict[str, str] = { "TextAnalyzeRequestCallbackMethod": ".text", "TextAnalyzeRequestCustomIntentMode": ".text", "TextAnalyzeRequestCustomTopicMode": ".text", - "TextAnalyzeRequestSummarizeZero": ".text", + "TextAnalyzeRequestSummarize": ".text", "text": ".text", } @@ -47,6 +47,6 @@ def __dir__(): "TextAnalyzeRequestCallbackMethod", "TextAnalyzeRequestCustomIntentMode", "TextAnalyzeRequestCustomTopicMode", - "TextAnalyzeRequestSummarizeZero", + "TextAnalyzeRequestSummarize", "text", ] diff --git a/src/deepgram/read/v1/text/__init__.py b/src/deepgram/read/v1/text/__init__.py index 839a83c9..069d04c2 100644 --- a/src/deepgram/read/v1/text/__init__.py +++ b/src/deepgram/read/v1/text/__init__.py @@ -10,13 +10,13 @@ TextAnalyzeRequestCallbackMethod, TextAnalyzeRequestCustomIntentMode, TextAnalyzeRequestCustomTopicMode, - TextAnalyzeRequestSummarizeZero, + TextAnalyzeRequestSummarize, ) _dynamic_imports: typing.Dict[str, str] = { "TextAnalyzeRequestCallbackMethod": ".types", "TextAnalyzeRequestCustomIntentMode": ".types", "TextAnalyzeRequestCustomTopicMode": ".types", - "TextAnalyzeRequestSummarizeZero": ".types", + "TextAnalyzeRequestSummarize": ".types", } @@ -45,5 +45,5 @@ def __dir__(): "TextAnalyzeRequestCallbackMethod", "TextAnalyzeRequestCustomIntentMode", "TextAnalyzeRequestCustomTopicMode", - "TextAnalyzeRequestSummarizeZero", + "TextAnalyzeRequestSummarize", ] diff --git a/src/deepgram/read/v1/text/client.py b/src/deepgram/read/v1/text/client.py index 44f607f4..d04b8a7c 100644 --- a/src/deepgram/read/v1/text/client.py +++ b/src/deepgram/read/v1/text/client.py @@ -10,7 +10,7 @@ from .types.text_analyze_request_callback_method import TextAnalyzeRequestCallbackMethod from .types.text_analyze_request_custom_intent_mode import TextAnalyzeRequestCustomIntentMode from .types.text_analyze_request_custom_topic_mode import TextAnalyzeRequestCustomTopicMode -from .types.text_analyze_request_summarize_zero import TextAnalyzeRequestSummarizeZero +from .types.text_analyze_request_summarize import TextAnalyzeRequestSummarize # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -38,7 +38,7 @@ def analyze( callback: typing.Optional[str] = None, callback_method: typing.Optional[TextAnalyzeRequestCallbackMethod] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[TextAnalyzeRequestSummarizeZero] = None, + summarize: typing.Optional[TextAnalyzeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -65,7 +65,7 @@ def analyze( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[TextAnalyzeRequestSummarizeZero] + summarize : typing.Optional[TextAnalyzeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -164,7 +164,7 @@ async def analyze( callback: typing.Optional[str] = None, callback_method: typing.Optional[TextAnalyzeRequestCallbackMethod] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[TextAnalyzeRequestSummarizeZero] = None, + summarize: typing.Optional[TextAnalyzeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -191,7 +191,7 @@ async def analyze( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[TextAnalyzeRequestSummarizeZero] + summarize : typing.Optional[TextAnalyzeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] diff --git a/src/deepgram/read/v1/text/raw_client.py b/src/deepgram/read/v1/text/raw_client.py index 66defd3c..65b7ebca 100644 --- a/src/deepgram/read/v1/text/raw_client.py +++ b/src/deepgram/read/v1/text/raw_client.py @@ -15,7 +15,7 @@ from .types.text_analyze_request_callback_method import TextAnalyzeRequestCallbackMethod from .types.text_analyze_request_custom_intent_mode import TextAnalyzeRequestCustomIntentMode from .types.text_analyze_request_custom_topic_mode import TextAnalyzeRequestCustomTopicMode -from .types.text_analyze_request_summarize_zero import TextAnalyzeRequestSummarizeZero +from .types.text_analyze_request_summarize import TextAnalyzeRequestSummarize # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -32,7 +32,7 @@ def analyze( callback: typing.Optional[str] = None, callback_method: typing.Optional[TextAnalyzeRequestCallbackMethod] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[TextAnalyzeRequestSummarizeZero] = None, + summarize: typing.Optional[TextAnalyzeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -59,7 +59,7 @@ def analyze( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[TextAnalyzeRequestSummarizeZero] + summarize : typing.Optional[TextAnalyzeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] @@ -159,7 +159,7 @@ async def analyze( callback: typing.Optional[str] = None, callback_method: typing.Optional[TextAnalyzeRequestCallbackMethod] = None, sentiment: typing.Optional[bool] = None, - summarize: typing.Optional[TextAnalyzeRequestSummarizeZero] = None, + summarize: typing.Optional[TextAnalyzeRequestSummarize] = None, tag: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, topics: typing.Optional[bool] = None, custom_topic: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, @@ -186,7 +186,7 @@ async def analyze( sentiment : typing.Optional[bool] Recognizes the sentiment throughout a transcript or text - summarize : typing.Optional[TextAnalyzeRequestSummarizeZero] + summarize : typing.Optional[TextAnalyzeRequestSummarize] Summarize content. For Listen API, supports string version option. For Read API, accepts boolean only. tag : typing.Optional[typing.Union[str, typing.Sequence[str]]] diff --git a/src/deepgram/read/v1/text/types/__init__.py b/src/deepgram/read/v1/text/types/__init__.py index f4086661..cb955d49 100644 --- a/src/deepgram/read/v1/text/types/__init__.py +++ b/src/deepgram/read/v1/text/types/__init__.py @@ -9,12 +9,12 @@ from .text_analyze_request_callback_method import TextAnalyzeRequestCallbackMethod from .text_analyze_request_custom_intent_mode import TextAnalyzeRequestCustomIntentMode from .text_analyze_request_custom_topic_mode import TextAnalyzeRequestCustomTopicMode - from .text_analyze_request_summarize_zero import TextAnalyzeRequestSummarizeZero + from .text_analyze_request_summarize import TextAnalyzeRequestSummarize _dynamic_imports: typing.Dict[str, str] = { "TextAnalyzeRequestCallbackMethod": ".text_analyze_request_callback_method", "TextAnalyzeRequestCustomIntentMode": ".text_analyze_request_custom_intent_mode", "TextAnalyzeRequestCustomTopicMode": ".text_analyze_request_custom_topic_mode", - "TextAnalyzeRequestSummarizeZero": ".text_analyze_request_summarize_zero", + "TextAnalyzeRequestSummarize": ".text_analyze_request_summarize", } @@ -43,5 +43,5 @@ def __dir__(): "TextAnalyzeRequestCallbackMethod", "TextAnalyzeRequestCustomIntentMode", "TextAnalyzeRequestCustomTopicMode", - "TextAnalyzeRequestSummarizeZero", + "TextAnalyzeRequestSummarize", ] diff --git a/src/deepgram/agent/v1/types/agent_v1keep_alive_type.py b/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py similarity index 59% rename from src/deepgram/agent/v1/types/agent_v1keep_alive_type.py rename to src/deepgram/read/v1/text/types/text_analyze_request_summarize.py index 01adb138..74aa7e3b 100644 --- a/src/deepgram/agent/v1/types/agent_v1keep_alive_type.py +++ b/src/deepgram/read/v1/text/types/text_analyze_request_summarize.py @@ -2,4 +2,4 @@ import typing -AgentV1KeepAliveType = typing.Union[typing.Literal["KeepAlive"], typing.Any] +TextAnalyzeRequestSummarize = typing.Union[typing.Literal["v2"], typing.Any] diff --git a/src/deepgram/requests/__init__.py b/src/deepgram/requests/__init__.py index 8406dfe5..a2dbf463 100644 --- a/src/deepgram/requests/__init__.py +++ b/src/deepgram/requests/__init__.py @@ -6,6 +6,7 @@ from importlib import import_module if typing.TYPE_CHECKING: + from .agent_configuration_v1 import AgentConfigurationV1Params from .agent_think_models_v1response import AgentThinkModelsV1ResponseParams from .agent_think_models_v1response_models_item import AgentThinkModelsV1ResponseModelsItemParams from .agent_think_models_v1response_models_item_id import AgentThinkModelsV1ResponseModelsItemIdParams @@ -13,10 +14,19 @@ from .agent_think_models_v1response_models_item_three import AgentThinkModelsV1ResponseModelsItemThreeParams from .agent_think_models_v1response_models_item_two import AgentThinkModelsV1ResponseModelsItemTwoParams from .agent_think_models_v1response_models_item_zero import AgentThinkModelsV1ResponseModelsItemZeroParams + from .agent_variable_v1 import AgentVariableV1Params + from .anthropic import AnthropicParams + from .aws_bedrock_think_provider import AwsBedrockThinkProviderParams + from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentialsParams + from .aws_polly_speak_provider import AwsPollySpeakProviderParams + from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentialsParams from .billing_breakdown_v1response import BillingBreakdownV1ResponseParams from .billing_breakdown_v1response_resolution import BillingBreakdownV1ResponseResolutionParams from .billing_breakdown_v1response_results_item import BillingBreakdownV1ResponseResultsItemParams from .billing_breakdown_v1response_results_item_grouping import BillingBreakdownV1ResponseResultsItemGroupingParams + from .cartesia import CartesiaParams + from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoiceParams + from .create_agent_configuration_v1response import CreateAgentConfigurationV1ResponseParams from .create_key_v1response import CreateKeyV1ResponseParams from .create_project_distribution_credentials_v1response import CreateProjectDistributionCredentialsV1ResponseParams from .create_project_distribution_credentials_v1response_distribution_credentials import ( @@ -26,10 +36,12 @@ CreateProjectDistributionCredentialsV1ResponseMemberParams, ) from .create_project_invite_v1response import CreateProjectInviteV1ResponseParams + from .deepgram import DeepgramParams from .delete_project_invite_v1response import DeleteProjectInviteV1ResponseParams from .delete_project_key_v1response import DeleteProjectKeyV1ResponseParams from .delete_project_member_v1response import DeleteProjectMemberV1ResponseParams from .delete_project_v1response import DeleteProjectV1ResponseParams + from .eleven_labs_speak_provider import ElevenLabsSpeakProviderParams from .error_response import ErrorResponseParams from .error_response_legacy_error import ErrorResponseLegacyErrorParams from .error_response_modern_error import ErrorResponseModernErrorParams @@ -51,8 +63,12 @@ from .get_project_key_v1response_item_member_api_key import GetProjectKeyV1ResponseItemMemberApiKeyParams from .get_project_request_v1response import GetProjectRequestV1ResponseParams from .get_project_v1response import GetProjectV1ResponseParams + from .google import GoogleParams from .grant_v1response import GrantV1ResponseParams + from .groq import GroqParams from .leave_project_v1response import LeaveProjectV1ResponseParams + from .list_agent_configurations_v1response import ListAgentConfigurationsV1ResponseParams + from .list_agent_variables_v1response import ListAgentVariablesV1ResponseParams from .list_billing_fields_v1response import ListBillingFieldsV1ResponseParams from .list_models_v1response import ListModelsV1ResponseParams from .list_models_v1response_stt_models import ListModelsV1ResponseSttModelsParams @@ -129,6 +145,8 @@ ListenV1ResponseResultsUtterancesItemWordsItemParams, ) from .listen_v2keyterm import ListenV2KeytermParams + from .open_ai_speak_provider import OpenAiSpeakProviderParams + from .open_ai_think_provider import OpenAiThinkProviderParams from .project_request_response import ProjectRequestResponseParams from .read_v1request import ReadV1RequestParams from .read_v1request_text import ReadV1RequestTextParams @@ -161,7 +179,29 @@ from .shared_topics_results_topics_segments_item_topics_item import ( SharedTopicsResultsTopicsSegmentsItemTopicsItemParams, ) + from .speak_settings_v1 import SpeakSettingsV1Params + from .speak_settings_v1endpoint import SpeakSettingsV1EndpointParams + from .speak_settings_v1provider import ( + SpeakSettingsV1ProviderParams, + SpeakSettingsV1Provider_AwsPollyParams, + SpeakSettingsV1Provider_CartesiaParams, + SpeakSettingsV1Provider_DeepgramParams, + SpeakSettingsV1Provider_ElevenLabsParams, + SpeakSettingsV1Provider_OpenAiParams, + ) from .think_settings_v1 import ThinkSettingsV1Params + from .think_settings_v1context_length import ThinkSettingsV1ContextLengthParams + from .think_settings_v1endpoint import ThinkSettingsV1EndpointParams + from .think_settings_v1functions_item import ThinkSettingsV1FunctionsItemParams + from .think_settings_v1functions_item_endpoint import ThinkSettingsV1FunctionsItemEndpointParams + from .think_settings_v1provider import ( + ThinkSettingsV1ProviderParams, + ThinkSettingsV1Provider_AnthropicParams, + ThinkSettingsV1Provider_AwsBedrockParams, + ThinkSettingsV1Provider_GoogleParams, + ThinkSettingsV1Provider_GroqParams, + ThinkSettingsV1Provider_OpenAiParams, + ) from .update_project_member_scopes_v1response import UpdateProjectMemberScopesV1ResponseParams from .update_project_v1response import UpdateProjectV1ResponseParams from .usage_breakdown_v1response import UsageBreakdownV1ResponseParams @@ -173,6 +213,7 @@ from .usage_v1response import UsageV1ResponseParams from .usage_v1response_resolution import UsageV1ResponseResolutionParams _dynamic_imports: typing.Dict[str, str] = { + "AgentConfigurationV1Params": ".agent_configuration_v1", "AgentThinkModelsV1ResponseModelsItemIdParams": ".agent_think_models_v1response_models_item_id", "AgentThinkModelsV1ResponseModelsItemOneParams": ".agent_think_models_v1response_models_item_one", "AgentThinkModelsV1ResponseModelsItemParams": ".agent_think_models_v1response_models_item", @@ -180,19 +221,30 @@ "AgentThinkModelsV1ResponseModelsItemTwoParams": ".agent_think_models_v1response_models_item_two", "AgentThinkModelsV1ResponseModelsItemZeroParams": ".agent_think_models_v1response_models_item_zero", "AgentThinkModelsV1ResponseParams": ".agent_think_models_v1response", + "AgentVariableV1Params": ".agent_variable_v1", + "AnthropicParams": ".anthropic", + "AwsBedrockThinkProviderCredentialsParams": ".aws_bedrock_think_provider_credentials", + "AwsBedrockThinkProviderParams": ".aws_bedrock_think_provider", + "AwsPollySpeakProviderCredentialsParams": ".aws_polly_speak_provider_credentials", + "AwsPollySpeakProviderParams": ".aws_polly_speak_provider", "BillingBreakdownV1ResponseParams": ".billing_breakdown_v1response", "BillingBreakdownV1ResponseResolutionParams": ".billing_breakdown_v1response_resolution", "BillingBreakdownV1ResponseResultsItemGroupingParams": ".billing_breakdown_v1response_results_item_grouping", "BillingBreakdownV1ResponseResultsItemParams": ".billing_breakdown_v1response_results_item", + "CartesiaParams": ".cartesia", + "CartesiaSpeakProviderVoiceParams": ".cartesia_speak_provider_voice", + "CreateAgentConfigurationV1ResponseParams": ".create_agent_configuration_v1response", "CreateKeyV1ResponseParams": ".create_key_v1response", "CreateProjectDistributionCredentialsV1ResponseDistributionCredentialsParams": ".create_project_distribution_credentials_v1response_distribution_credentials", "CreateProjectDistributionCredentialsV1ResponseMemberParams": ".create_project_distribution_credentials_v1response_member", "CreateProjectDistributionCredentialsV1ResponseParams": ".create_project_distribution_credentials_v1response", "CreateProjectInviteV1ResponseParams": ".create_project_invite_v1response", + "DeepgramParams": ".deepgram", "DeleteProjectInviteV1ResponseParams": ".delete_project_invite_v1response", "DeleteProjectKeyV1ResponseParams": ".delete_project_key_v1response", "DeleteProjectMemberV1ResponseParams": ".delete_project_member_v1response", "DeleteProjectV1ResponseParams": ".delete_project_v1response", + "ElevenLabsSpeakProviderParams": ".eleven_labs_speak_provider", "ErrorResponseLegacyErrorParams": ".error_response_legacy_error", "ErrorResponseModernErrorParams": ".error_response_modern_error", "ErrorResponseParams": ".error_response", @@ -210,8 +262,12 @@ "GetProjectKeyV1ResponseParams": ".get_project_key_v1response", "GetProjectRequestV1ResponseParams": ".get_project_request_v1response", "GetProjectV1ResponseParams": ".get_project_v1response", + "GoogleParams": ".google", "GrantV1ResponseParams": ".grant_v1response", + "GroqParams": ".groq", "LeaveProjectV1ResponseParams": ".leave_project_v1response", + "ListAgentConfigurationsV1ResponseParams": ".list_agent_configurations_v1response", + "ListAgentVariablesV1ResponseParams": ".list_agent_variables_v1response", "ListBillingFieldsV1ResponseParams": ".list_billing_fields_v1response", "ListModelsV1ResponseParams": ".list_models_v1response", "ListModelsV1ResponseSttModelsParams": ".list_models_v1response_stt_models", @@ -262,6 +318,8 @@ "ListenV1ResponseResultsUtterancesItemWordsItemParams": ".listen_v1response_results_utterances_item_words_item", "ListenV1ResponseResultsUtterancesParams": ".listen_v1response_results_utterances", "ListenV2KeytermParams": ".listen_v2keyterm", + "OpenAiSpeakProviderParams": ".open_ai_speak_provider", + "OpenAiThinkProviderParams": ".open_ai_think_provider", "ProjectRequestResponseParams": ".project_request_response", "ReadV1RequestParams": ".read_v1request", "ReadV1RequestTextParams": ".read_v1request_text", @@ -290,7 +348,25 @@ "SharedTopicsResultsTopicsParams": ".shared_topics_results_topics", "SharedTopicsResultsTopicsSegmentsItemParams": ".shared_topics_results_topics_segments_item", "SharedTopicsResultsTopicsSegmentsItemTopicsItemParams": ".shared_topics_results_topics_segments_item_topics_item", + "SpeakSettingsV1EndpointParams": ".speak_settings_v1endpoint", + "SpeakSettingsV1Params": ".speak_settings_v1", + "SpeakSettingsV1ProviderParams": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_AwsPollyParams": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_CartesiaParams": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_DeepgramParams": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_ElevenLabsParams": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_OpenAiParams": ".speak_settings_v1provider", + "ThinkSettingsV1ContextLengthParams": ".think_settings_v1context_length", + "ThinkSettingsV1EndpointParams": ".think_settings_v1endpoint", + "ThinkSettingsV1FunctionsItemEndpointParams": ".think_settings_v1functions_item_endpoint", + "ThinkSettingsV1FunctionsItemParams": ".think_settings_v1functions_item", "ThinkSettingsV1Params": ".think_settings_v1", + "ThinkSettingsV1ProviderParams": ".think_settings_v1provider", + "ThinkSettingsV1Provider_AnthropicParams": ".think_settings_v1provider", + "ThinkSettingsV1Provider_AwsBedrockParams": ".think_settings_v1provider", + "ThinkSettingsV1Provider_GoogleParams": ".think_settings_v1provider", + "ThinkSettingsV1Provider_GroqParams": ".think_settings_v1provider", + "ThinkSettingsV1Provider_OpenAiParams": ".think_settings_v1provider", "UpdateProjectMemberScopesV1ResponseParams": ".update_project_member_scopes_v1response", "UpdateProjectV1ResponseParams": ".update_project_v1response", "UsageBreakdownV1ResponseParams": ".usage_breakdown_v1response", @@ -326,6 +402,7 @@ def __dir__(): __all__ = [ + "AgentConfigurationV1Params", "AgentThinkModelsV1ResponseModelsItemIdParams", "AgentThinkModelsV1ResponseModelsItemOneParams", "AgentThinkModelsV1ResponseModelsItemParams", @@ -333,19 +410,30 @@ def __dir__(): "AgentThinkModelsV1ResponseModelsItemTwoParams", "AgentThinkModelsV1ResponseModelsItemZeroParams", "AgentThinkModelsV1ResponseParams", + "AgentVariableV1Params", + "AnthropicParams", + "AwsBedrockThinkProviderCredentialsParams", + "AwsBedrockThinkProviderParams", + "AwsPollySpeakProviderCredentialsParams", + "AwsPollySpeakProviderParams", "BillingBreakdownV1ResponseParams", "BillingBreakdownV1ResponseResolutionParams", "BillingBreakdownV1ResponseResultsItemGroupingParams", "BillingBreakdownV1ResponseResultsItemParams", + "CartesiaParams", + "CartesiaSpeakProviderVoiceParams", + "CreateAgentConfigurationV1ResponseParams", "CreateKeyV1ResponseParams", "CreateProjectDistributionCredentialsV1ResponseDistributionCredentialsParams", "CreateProjectDistributionCredentialsV1ResponseMemberParams", "CreateProjectDistributionCredentialsV1ResponseParams", "CreateProjectInviteV1ResponseParams", + "DeepgramParams", "DeleteProjectInviteV1ResponseParams", "DeleteProjectKeyV1ResponseParams", "DeleteProjectMemberV1ResponseParams", "DeleteProjectV1ResponseParams", + "ElevenLabsSpeakProviderParams", "ErrorResponseLegacyErrorParams", "ErrorResponseModernErrorParams", "ErrorResponseParams", @@ -363,8 +451,12 @@ def __dir__(): "GetProjectKeyV1ResponseParams", "GetProjectRequestV1ResponseParams", "GetProjectV1ResponseParams", + "GoogleParams", "GrantV1ResponseParams", + "GroqParams", "LeaveProjectV1ResponseParams", + "ListAgentConfigurationsV1ResponseParams", + "ListAgentVariablesV1ResponseParams", "ListBillingFieldsV1ResponseParams", "ListModelsV1ResponseParams", "ListModelsV1ResponseSttModelsParams", @@ -415,6 +507,8 @@ def __dir__(): "ListenV1ResponseResultsUtterancesItemWordsItemParams", "ListenV1ResponseResultsUtterancesParams", "ListenV2KeytermParams", + "OpenAiSpeakProviderParams", + "OpenAiThinkProviderParams", "ProjectRequestResponseParams", "ReadV1RequestParams", "ReadV1RequestTextParams", @@ -443,7 +537,25 @@ def __dir__(): "SharedTopicsResultsTopicsParams", "SharedTopicsResultsTopicsSegmentsItemParams", "SharedTopicsResultsTopicsSegmentsItemTopicsItemParams", + "SpeakSettingsV1EndpointParams", + "SpeakSettingsV1Params", + "SpeakSettingsV1ProviderParams", + "SpeakSettingsV1Provider_AwsPollyParams", + "SpeakSettingsV1Provider_CartesiaParams", + "SpeakSettingsV1Provider_DeepgramParams", + "SpeakSettingsV1Provider_ElevenLabsParams", + "SpeakSettingsV1Provider_OpenAiParams", + "ThinkSettingsV1ContextLengthParams", + "ThinkSettingsV1EndpointParams", + "ThinkSettingsV1FunctionsItemEndpointParams", + "ThinkSettingsV1FunctionsItemParams", "ThinkSettingsV1Params", + "ThinkSettingsV1ProviderParams", + "ThinkSettingsV1Provider_AnthropicParams", + "ThinkSettingsV1Provider_AwsBedrockParams", + "ThinkSettingsV1Provider_GoogleParams", + "ThinkSettingsV1Provider_GroqParams", + "ThinkSettingsV1Provider_OpenAiParams", "UpdateProjectMemberScopesV1ResponseParams", "UpdateProjectV1ResponseParams", "UsageBreakdownV1ResponseParams", diff --git a/src/deepgram/requests/agent_configuration_v1.py b/src/deepgram/requests/agent_configuration_v1.py new file mode 100644 index 00000000..1bf38832 --- /dev/null +++ b/src/deepgram/requests/agent_configuration_v1.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions + + +class AgentConfigurationV1Params(typing_extensions.TypedDict): + """ + A reusable agent configuration + """ + + agent_id: str + """ + The unique identifier of the agent configuration + """ + + config: typing.Dict[str, typing.Any] + """ + The agent configuration object + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, str]] + """ + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + Timestamp when the configuration was created + """ + + updated_at: typing_extensions.NotRequired[dt.datetime] + """ + Timestamp when the configuration was last updated + """ diff --git a/src/deepgram/requests/agent_think_models_v1response_models_item_id.py b/src/deepgram/requests/agent_think_models_v1response_models_item_id.py index b1b4c7da..4bfeab9b 100644 --- a/src/deepgram/requests/agent_think_models_v1response_models_item_id.py +++ b/src/deepgram/requests/agent_think_models_v1response_models_item_id.py @@ -1,7 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_think_models_v1response_models_item_id_provider import AgentThinkModelsV1ResponseModelsItemIdProvider class AgentThinkModelsV1ResponseModelsItemIdParams(typing_extensions.TypedDict): @@ -19,7 +20,7 @@ class AgentThinkModelsV1ResponseModelsItemIdParams(typing_extensions.TypedDict): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemIdProvider + provider: typing.Literal["aws_bedrock"] """ The provider of the model """ diff --git a/src/deepgram/requests/agent_think_models_v1response_models_item_one.py b/src/deepgram/requests/agent_think_models_v1response_models_item_one.py index 5246711a..88f45618 100644 --- a/src/deepgram/requests/agent_think_models_v1response_models_item_one.py +++ b/src/deepgram/requests/agent_think_models_v1response_models_item_one.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions from ..types.agent_think_models_v1response_models_item_one_id import AgentThinkModelsV1ResponseModelsItemOneId -from ..types.agent_think_models_v1response_models_item_one_provider import ( - AgentThinkModelsV1ResponseModelsItemOneProvider, -) class AgentThinkModelsV1ResponseModelsItemOneParams(typing_extensions.TypedDict): @@ -22,7 +21,7 @@ class AgentThinkModelsV1ResponseModelsItemOneParams(typing_extensions.TypedDict) The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemOneProvider + provider: typing.Literal["anthropic"] """ The provider of the model """ diff --git a/src/deepgram/requests/agent_think_models_v1response_models_item_three.py b/src/deepgram/requests/agent_think_models_v1response_models_item_three.py index 4db87165..cabb968b 100644 --- a/src/deepgram/requests/agent_think_models_v1response_models_item_three.py +++ b/src/deepgram/requests/agent_think_models_v1response_models_item_three.py @@ -1,10 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.agent_think_models_v1response_models_item_three_id import AgentThinkModelsV1ResponseModelsItemThreeId -from ..types.agent_think_models_v1response_models_item_three_provider import ( - AgentThinkModelsV1ResponseModelsItemThreeProvider, -) class AgentThinkModelsV1ResponseModelsItemThreeParams(typing_extensions.TypedDict): @@ -12,7 +10,7 @@ class AgentThinkModelsV1ResponseModelsItemThreeParams(typing_extensions.TypedDic Groq models """ - id: AgentThinkModelsV1ResponseModelsItemThreeId + id: typing.Literal["openai/gpt-oss-20b"] """ The unique identifier of the Groq model """ @@ -22,7 +20,7 @@ class AgentThinkModelsV1ResponseModelsItemThreeParams(typing_extensions.TypedDic The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemThreeProvider + provider: typing.Literal["groq"] """ The provider of the model """ diff --git a/src/deepgram/requests/agent_think_models_v1response_models_item_two.py b/src/deepgram/requests/agent_think_models_v1response_models_item_two.py index 1ec41562..5466ad3d 100644 --- a/src/deepgram/requests/agent_think_models_v1response_models_item_two.py +++ b/src/deepgram/requests/agent_think_models_v1response_models_item_two.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions from ..types.agent_think_models_v1response_models_item_two_id import AgentThinkModelsV1ResponseModelsItemTwoId -from ..types.agent_think_models_v1response_models_item_two_provider import ( - AgentThinkModelsV1ResponseModelsItemTwoProvider, -) class AgentThinkModelsV1ResponseModelsItemTwoParams(typing_extensions.TypedDict): @@ -22,7 +21,7 @@ class AgentThinkModelsV1ResponseModelsItemTwoParams(typing_extensions.TypedDict) The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemTwoProvider + provider: typing.Literal["google"] """ The provider of the model """ diff --git a/src/deepgram/requests/agent_think_models_v1response_models_item_zero.py b/src/deepgram/requests/agent_think_models_v1response_models_item_zero.py index e0a6c466..5b027b7c 100644 --- a/src/deepgram/requests/agent_think_models_v1response_models_item_zero.py +++ b/src/deepgram/requests/agent_think_models_v1response_models_item_zero.py @@ -1,10 +1,9 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions from ..types.agent_think_models_v1response_models_item_zero_id import AgentThinkModelsV1ResponseModelsItemZeroId -from ..types.agent_think_models_v1response_models_item_zero_provider import ( - AgentThinkModelsV1ResponseModelsItemZeroProvider, -) class AgentThinkModelsV1ResponseModelsItemZeroParams(typing_extensions.TypedDict): @@ -22,7 +21,7 @@ class AgentThinkModelsV1ResponseModelsItemZeroParams(typing_extensions.TypedDict The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemZeroProvider + provider: typing.Literal["open_ai"] """ The provider of the model """ diff --git a/src/deepgram/requests/agent_variable_v1.py b/src/deepgram/requests/agent_variable_v1.py new file mode 100644 index 00000000..33397074 --- /dev/null +++ b/src/deepgram/requests/agent_variable_v1.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import typing_extensions + + +class AgentVariableV1Params(typing_extensions.TypedDict): + """ + A template variable for agent configurations + """ + + variable_id: str + """ + The unique identifier of the variable + """ + + key: str + """ + The variable name, following the DG_ format + """ + + value: typing.Any + created_at: typing_extensions.NotRequired[dt.datetime] + """ + Timestamp when the variable was created + """ + + updated_at: typing_extensions.NotRequired[dt.datetime] + """ + Timestamp when the variable was last updated + """ diff --git a/src/deepgram/requests/anthropic.py b/src/deepgram/requests/anthropic.py new file mode 100644 index 00000000..3b60c72d --- /dev/null +++ b/src/deepgram/requests/anthropic.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.anthropic_think_provider_model import AnthropicThinkProviderModel + + +class AnthropicParams(typing_extensions.TypedDict): + type: typing.Literal["anthropic"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the Anthropic Messages API + """ + + model: AnthropicThinkProviderModel + """ + Anthropic model to use + """ + + temperature: typing_extensions.NotRequired[float] + """ + Anthropic temperature (0-1) + """ diff --git a/src/deepgram/requests/aws_bedrock_think_provider.py b/src/deepgram/requests/aws_bedrock_think_provider.py new file mode 100644 index 00000000..1bdfd6f4 --- /dev/null +++ b/src/deepgram/requests/aws_bedrock_think_provider.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.aws_bedrock_think_provider_model import AwsBedrockThinkProviderModel +from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentialsParams + + +class AwsBedrockThinkProviderParams(typing_extensions.TypedDict): + type: typing.Literal["aws_bedrock"] + model: AwsBedrockThinkProviderModel + """ + AWS Bedrock model to use + """ + + temperature: typing_extensions.NotRequired[float] + """ + AWS Bedrock temperature (0-2) + """ + + credentials: typing_extensions.NotRequired[AwsBedrockThinkProviderCredentialsParams] + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ diff --git a/src/deepgram/requests/aws_bedrock_think_provider_credentials.py b/src/deepgram/requests/aws_bedrock_think_provider_credentials.py new file mode 100644 index 00000000..ff2a0cb4 --- /dev/null +++ b/src/deepgram/requests/aws_bedrock_think_provider_credentials.py @@ -0,0 +1,35 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.aws_bedrock_think_provider_credentials_type import AwsBedrockThinkProviderCredentialsType + + +class AwsBedrockThinkProviderCredentialsParams(typing_extensions.TypedDict): + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ + + type: typing_extensions.NotRequired[AwsBedrockThinkProviderCredentialsType] + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ + + region: typing_extensions.NotRequired[str] + """ + AWS region + """ + + access_key_id: typing_extensions.NotRequired[str] + """ + AWS access key + """ + + secret_access_key: typing_extensions.NotRequired[str] + """ + AWS secret access key + """ + + session_token: typing_extensions.NotRequired[str] + """ + AWS session token (required for STS only) + """ diff --git a/src/deepgram/requests/aws_polly_speak_provider.py b/src/deepgram/requests/aws_polly_speak_provider.py new file mode 100644 index 00000000..d5cbb726 --- /dev/null +++ b/src/deepgram/requests/aws_polly_speak_provider.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.aws_polly_speak_provider_engine import AwsPollySpeakProviderEngine +from ..types.aws_polly_speak_provider_voice import AwsPollySpeakProviderVoice +from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentialsParams + + +class AwsPollySpeakProviderParams(typing_extensions.TypedDict): + type: typing.Literal["aws_polly"] + voice: AwsPollySpeakProviderVoice + """ + AWS Polly voice name + """ + + language: str + """ + Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API + """ + + language_code: typing_extensions.NotRequired[str] + """ + Use the `language` field instead. + """ + + engine: AwsPollySpeakProviderEngine + credentials: AwsPollySpeakProviderCredentialsParams diff --git a/src/deepgram/requests/aws_polly_speak_provider_credentials.py b/src/deepgram/requests/aws_polly_speak_provider_credentials.py new file mode 100644 index 00000000..fd279a88 --- /dev/null +++ b/src/deepgram/requests/aws_polly_speak_provider_credentials.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from ..types.aws_polly_speak_provider_credentials_type import AwsPollySpeakProviderCredentialsType + + +class AwsPollySpeakProviderCredentialsParams(typing_extensions.TypedDict): + type: AwsPollySpeakProviderCredentialsType + region: str + access_key_id: str + secret_access_key: str + session_token: typing_extensions.NotRequired[str] + """ + Required for STS only + """ diff --git a/src/deepgram/requests/cartesia.py b/src/deepgram/requests/cartesia.py new file mode 100644 index 00000000..40d34328 --- /dev/null +++ b/src/deepgram/requests/cartesia.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.cartesia_speak_provider_model_id import CartesiaSpeakProviderModelId +from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoiceParams + + +class CartesiaParams(typing_extensions.TypedDict): + type: typing.Literal["cartesia"] + version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]] + """ + The API version header for the Cartesia text-to-speech API + """ + + model_id: CartesiaSpeakProviderModelId + """ + Cartesia model ID + """ + + voice: CartesiaSpeakProviderVoiceParams + language: typing_extensions.NotRequired[str] + """ + Cartesia language code + """ + + volume: typing_extensions.NotRequired[float] + """ + Volume level for Cartesia TTS output. Valid range: 0.5 to 2.0. See [Cartesia documentation](https://docs.cartesia.ai/build-with-cartesia/sonic-3/volume-speed-emotion#volume-speed-and-emotion). + """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py b/src/deepgram/requests/cartesia_speak_provider_voice.py similarity index 67% rename from src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py rename to src/deepgram/requests/cartesia_speak_provider_voice.py index 5702bdb7..89929e27 100644 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py +++ b/src/deepgram/requests/cartesia_speak_provider_voice.py @@ -3,7 +3,7 @@ import typing_extensions -class AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoiceParams(typing_extensions.TypedDict): +class CartesiaSpeakProviderVoiceParams(typing_extensions.TypedDict): mode: str """ Cartesia voice mode diff --git a/src/deepgram/requests/create_agent_configuration_v1response.py b/src/deepgram/requests/create_agent_configuration_v1response.py new file mode 100644 index 00000000..78b025a1 --- /dev/null +++ b/src/deepgram/requests/create_agent_configuration_v1response.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class CreateAgentConfigurationV1ResponseParams(typing_extensions.TypedDict): + agent_id: str + """ + The unique identifier of the newly created agent configuration + """ + + config: typing.Dict[str, typing.Any] + """ + The parsed agent configuration object + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, str]] + """ + Metadata associated with the agent configuration + """ diff --git a/src/deepgram/requests/deepgram.py b/src/deepgram/requests/deepgram.py new file mode 100644 index 00000000..a5ad6104 --- /dev/null +++ b/src/deepgram/requests/deepgram.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.deepgram_speak_provider_model import DeepgramSpeakProviderModel + + +class DeepgramParams(typing_extensions.TypedDict): + type: typing.Literal["deepgram"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the Deepgram text-to-speech API + """ + + model: DeepgramSpeakProviderModel + """ + Deepgram TTS model + """ + + speed: typing_extensions.NotRequired[float] + """ + Speaking rate multiplier that adjusts the pace of generated speech while preserving natural prosody and voice quality. Not yet supported in all languages. + """ diff --git a/src/deepgram/requests/eleven_labs_speak_provider.py b/src/deepgram/requests/eleven_labs_speak_provider.py new file mode 100644 index 00000000..e0f24bf4 --- /dev/null +++ b/src/deepgram/requests/eleven_labs_speak_provider.py @@ -0,0 +1,29 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.eleven_labs_speak_provider_model_id import ElevenLabsSpeakProviderModelId + + +class ElevenLabsSpeakProviderParams(typing_extensions.TypedDict): + type: typing.Literal["eleven_labs"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the ElevenLabs text-to-speech API + """ + + model_id: ElevenLabsSpeakProviderModelId + """ + Eleven Labs model ID + """ + + language: typing_extensions.NotRequired[str] + """ + Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API + """ + + language_code: typing_extensions.NotRequired[str] + """ + Use the `language` field instead. + """ diff --git a/src/deepgram/requests/google.py b/src/deepgram/requests/google.py new file mode 100644 index 00000000..7c183372 --- /dev/null +++ b/src/deepgram/requests/google.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.google_think_provider_model import GoogleThinkProviderModel + + +class GoogleParams(typing_extensions.TypedDict): + type: typing.Literal["google"] + version: typing_extensions.NotRequired[typing.Literal["v1beta"]] + """ + The REST API version for the Google generative language API + """ + + model: GoogleThinkProviderModel + """ + Google model to use + """ + + temperature: typing_extensions.NotRequired[float] + """ + Google temperature (0-2) + """ diff --git a/src/deepgram/requests/groq.py b/src/deepgram/requests/groq.py new file mode 100644 index 00000000..526dbff3 --- /dev/null +++ b/src/deepgram/requests/groq.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions + + +class GroqParams(typing_extensions.TypedDict): + type: typing.Literal["groq"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the Groq's chat completions API (mostly OpenAI-compatible) + """ + + model: typing.Literal["openai/gpt-oss-20b"] + """ + Groq model to use + """ + + temperature: typing_extensions.NotRequired[float] + """ + Groq temperature (0-2) + """ diff --git a/src/deepgram/requests/list_agent_configurations_v1response.py b/src/deepgram/requests/list_agent_configurations_v1response.py new file mode 100644 index 00000000..e1214186 --- /dev/null +++ b/src/deepgram/requests/list_agent_configurations_v1response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .agent_configuration_v1 import AgentConfigurationV1Params + + +class ListAgentConfigurationsV1ResponseParams(typing_extensions.TypedDict): + agents: typing_extensions.NotRequired[typing.Sequence[AgentConfigurationV1Params]] + """ + A list of agent configurations for the project + """ diff --git a/src/deepgram/requests/list_agent_variables_v1response.py b/src/deepgram/requests/list_agent_variables_v1response.py new file mode 100644 index 00000000..746179c3 --- /dev/null +++ b/src/deepgram/requests/list_agent_variables_v1response.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from .agent_variable_v1 import AgentVariableV1Params + + +class ListAgentVariablesV1ResponseParams(typing_extensions.TypedDict): + variables: typing_extensions.NotRequired[typing.Sequence[AgentVariableV1Params]] + """ + A list of agent variables for the project + """ diff --git a/src/deepgram/requests/open_ai_speak_provider.py b/src/deepgram/requests/open_ai_speak_provider.py new file mode 100644 index 00000000..dd341584 --- /dev/null +++ b/src/deepgram/requests/open_ai_speak_provider.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.open_ai_speak_provider_model import OpenAiSpeakProviderModel +from ..types.open_ai_speak_provider_voice import OpenAiSpeakProviderVoice + + +class OpenAiSpeakProviderParams(typing_extensions.TypedDict): + type: typing.Literal["open_ai"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the OpenAI text-to-speech API + """ + + model: OpenAiSpeakProviderModel + """ + OpenAI TTS model + """ + + voice: OpenAiSpeakProviderVoice + """ + OpenAI voice + """ diff --git a/src/deepgram/requests/open_ai_think_provider.py b/src/deepgram/requests/open_ai_think_provider.py new file mode 100644 index 00000000..447fd842 --- /dev/null +++ b/src/deepgram/requests/open_ai_think_provider.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import typing_extensions +from ..types.open_ai_think_provider_model import OpenAiThinkProviderModel + + +class OpenAiThinkProviderParams(typing_extensions.TypedDict): + type: typing.Literal["open_ai"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + """ + The REST API version for the OpenAI chat completions API + """ + + model: OpenAiThinkProviderModel + """ + OpenAI model to use + """ + + temperature: typing_extensions.NotRequired[float] + """ + OpenAI temperature (0-2) + """ diff --git a/src/deepgram/requests/speak_settings_v1.py b/src/deepgram/requests/speak_settings_v1.py new file mode 100644 index 00000000..c11bc1c7 --- /dev/null +++ b/src/deepgram/requests/speak_settings_v1.py @@ -0,0 +1,14 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +from .speak_settings_v1endpoint import SpeakSettingsV1EndpointParams +from .speak_settings_v1provider import SpeakSettingsV1ProviderParams + + +class SpeakSettingsV1Params(typing_extensions.TypedDict): + provider: SpeakSettingsV1ProviderParams + endpoint: typing_extensions.NotRequired[SpeakSettingsV1EndpointParams] + """ + Optional if provider is Deepgram. Required for non-Deepgram TTS providers. + When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. + """ diff --git a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_endpoint.py b/src/deepgram/requests/speak_settings_v1endpoint.py similarity index 87% rename from src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_endpoint.py rename to src/deepgram/requests/speak_settings_v1endpoint.py index f051beef..1961d66c 100644 --- a/src/deepgram/agent/v1/requests/agent_v1update_speak_speak_one_item_endpoint.py +++ b/src/deepgram/requests/speak_settings_v1endpoint.py @@ -5,7 +5,7 @@ import typing_extensions -class AgentV1UpdateSpeakSpeakOneItemEndpointParams(typing_extensions.TypedDict): +class SpeakSettingsV1EndpointParams(typing_extensions.TypedDict): """ Optional if provider is Deepgram. Required for non-Deepgram TTS providers. When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. diff --git a/src/deepgram/requests/speak_settings_v1provider.py b/src/deepgram/requests/speak_settings_v1provider.py new file mode 100644 index 00000000..db3fac4e --- /dev/null +++ b/src/deepgram/requests/speak_settings_v1provider.py @@ -0,0 +1,65 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import typing_extensions +from ..types.aws_polly_speak_provider_engine import AwsPollySpeakProviderEngine +from ..types.aws_polly_speak_provider_voice import AwsPollySpeakProviderVoice +from ..types.cartesia_speak_provider_model_id import CartesiaSpeakProviderModelId +from ..types.deepgram_speak_provider_model import DeepgramSpeakProviderModel +from ..types.eleven_labs_speak_provider_model_id import ElevenLabsSpeakProviderModelId +from ..types.open_ai_speak_provider_model import OpenAiSpeakProviderModel +from ..types.open_ai_speak_provider_voice import OpenAiSpeakProviderVoice +from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentialsParams +from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoiceParams + + +class SpeakSettingsV1Provider_DeepgramParams(typing_extensions.TypedDict): + type: typing.Literal["deepgram"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model: DeepgramSpeakProviderModel + speed: typing_extensions.NotRequired[float] + + +class SpeakSettingsV1Provider_ElevenLabsParams(typing_extensions.TypedDict): + type: typing.Literal["eleven_labs"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model_id: ElevenLabsSpeakProviderModelId + language: typing_extensions.NotRequired[str] + language_code: typing_extensions.NotRequired[str] + + +class SpeakSettingsV1Provider_CartesiaParams(typing_extensions.TypedDict): + type: typing.Literal["cartesia"] + version: typing_extensions.NotRequired[typing.Literal["2025-03-17"]] + model_id: CartesiaSpeakProviderModelId + voice: CartesiaSpeakProviderVoiceParams + language: typing_extensions.NotRequired[str] + volume: typing_extensions.NotRequired[float] + + +class SpeakSettingsV1Provider_OpenAiParams(typing_extensions.TypedDict): + type: typing.Literal["open_ai"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model: OpenAiSpeakProviderModel + voice: OpenAiSpeakProviderVoice + + +class SpeakSettingsV1Provider_AwsPollyParams(typing_extensions.TypedDict): + type: typing.Literal["aws_polly"] + voice: AwsPollySpeakProviderVoice + language: str + language_code: typing_extensions.NotRequired[str] + engine: AwsPollySpeakProviderEngine + credentials: AwsPollySpeakProviderCredentialsParams + + +SpeakSettingsV1ProviderParams = typing.Union[ + SpeakSettingsV1Provider_DeepgramParams, + SpeakSettingsV1Provider_ElevenLabsParams, + SpeakSettingsV1Provider_CartesiaParams, + SpeakSettingsV1Provider_OpenAiParams, + SpeakSettingsV1Provider_AwsPollyParams, +] diff --git a/src/deepgram/requests/think_settings_v1.py b/src/deepgram/requests/think_settings_v1.py index 5a04e8f7..260b488d 100644 --- a/src/deepgram/requests/think_settings_v1.py +++ b/src/deepgram/requests/think_settings_v1.py @@ -3,7 +3,22 @@ import typing import typing_extensions +from .think_settings_v1context_length import ThinkSettingsV1ContextLengthParams +from .think_settings_v1endpoint import ThinkSettingsV1EndpointParams +from .think_settings_v1functions_item import ThinkSettingsV1FunctionsItemParams +from .think_settings_v1provider import ThinkSettingsV1ProviderParams class ThinkSettingsV1Params(typing_extensions.TypedDict): - context_length: typing_extensions.NotRequired[typing.Any] + provider: ThinkSettingsV1ProviderParams + endpoint: typing_extensions.NotRequired[ThinkSettingsV1EndpointParams] + """ + Optional for non-Deepgram LLM providers. When present, must include url field and headers object + """ + + functions: typing_extensions.NotRequired[typing.Sequence[ThinkSettingsV1FunctionsItemParams]] + prompt: typing_extensions.NotRequired[str] + context_length: typing_extensions.NotRequired[ThinkSettingsV1ContextLengthParams] + """ + Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used + """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_type.py b/src/deepgram/requests/think_settings_v1context_length.py similarity index 50% rename from src/deepgram/agent/v1/types/agent_v1settings_type.py rename to src/deepgram/requests/think_settings_v1context_length.py index 0018c56b..e1955c18 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_type.py +++ b/src/deepgram/requests/think_settings_v1context_length.py @@ -2,4 +2,4 @@ import typing -AgentV1SettingsType = typing.Union[typing.Literal["Settings"], typing.Any] +ThinkSettingsV1ContextLengthParams = typing.Union[typing.Literal["max"], float] diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_endpoint.py b/src/deepgram/requests/think_settings_v1endpoint.py similarity index 83% rename from src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_endpoint.py rename to src/deepgram/requests/think_settings_v1endpoint.py index 3aa4f9ef..38e2f34a 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_endpoint.py +++ b/src/deepgram/requests/think_settings_v1endpoint.py @@ -5,7 +5,7 @@ import typing_extensions -class AgentV1SettingsAgentThinkOneItemEndpointParams(typing_extensions.TypedDict): +class ThinkSettingsV1EndpointParams(typing_extensions.TypedDict): """ Optional for non-Deepgram LLM providers. When present, must include url field and headers object """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item.py b/src/deepgram/requests/think_settings_v1functions_item.py similarity index 59% rename from src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item.py rename to src/deepgram/requests/think_settings_v1functions_item.py index c917ae3d..2d8212c5 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item.py +++ b/src/deepgram/requests/think_settings_v1functions_item.py @@ -3,12 +3,10 @@ import typing import typing_extensions -from .agent_v1settings_agent_think_one_item_functions_item_endpoint import ( - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams, -) +from .think_settings_v1functions_item_endpoint import ThinkSettingsV1FunctionsItemEndpointParams -class AgentV1SettingsAgentThinkOneItemFunctionsItemParams(typing_extensions.TypedDict): +class ThinkSettingsV1FunctionsItemParams(typing_extensions.TypedDict): name: typing_extensions.NotRequired[str] """ Function name @@ -24,7 +22,7 @@ class AgentV1SettingsAgentThinkOneItemFunctionsItemParams(typing_extensions.Type Function parameters """ - endpoint: typing_extensions.NotRequired[AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams] + endpoint: typing_extensions.NotRequired[ThinkSettingsV1FunctionsItemEndpointParams] """ The Function endpoint to call. if not passed, function is called client-side """ diff --git a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item_endpoint.py b/src/deepgram/requests/think_settings_v1functions_item_endpoint.py similarity index 81% rename from src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item_endpoint.py rename to src/deepgram/requests/think_settings_v1functions_item_endpoint.py index b56b4d6a..3e3ae16e 100644 --- a/src/deepgram/agent/v1/requests/agent_v1settings_agent_think_one_item_functions_item_endpoint.py +++ b/src/deepgram/requests/think_settings_v1functions_item_endpoint.py @@ -5,7 +5,7 @@ import typing_extensions -class AgentV1SettingsAgentThinkOneItemFunctionsItemEndpointParams(typing_extensions.TypedDict): +class ThinkSettingsV1FunctionsItemEndpointParams(typing_extensions.TypedDict): """ The Function endpoint to call. if not passed, function is called client-side """ diff --git a/src/deepgram/requests/think_settings_v1provider.py b/src/deepgram/requests/think_settings_v1provider.py new file mode 100644 index 00000000..6de775b2 --- /dev/null +++ b/src/deepgram/requests/think_settings_v1provider.py @@ -0,0 +1,56 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import typing_extensions +from ..types.anthropic_think_provider_model import AnthropicThinkProviderModel +from ..types.aws_bedrock_think_provider_model import AwsBedrockThinkProviderModel +from ..types.google_think_provider_model import GoogleThinkProviderModel +from ..types.open_ai_think_provider_model import OpenAiThinkProviderModel +from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentialsParams + + +class ThinkSettingsV1Provider_OpenAiParams(typing_extensions.TypedDict): + type: typing.Literal["open_ai"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model: OpenAiThinkProviderModel + temperature: typing_extensions.NotRequired[float] + + +class ThinkSettingsV1Provider_AwsBedrockParams(typing_extensions.TypedDict): + type: typing.Literal["aws_bedrock"] + model: AwsBedrockThinkProviderModel + temperature: typing_extensions.NotRequired[float] + credentials: typing_extensions.NotRequired[AwsBedrockThinkProviderCredentialsParams] + + +class ThinkSettingsV1Provider_AnthropicParams(typing_extensions.TypedDict): + type: typing.Literal["anthropic"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model: AnthropicThinkProviderModel + temperature: typing_extensions.NotRequired[float] + + +class ThinkSettingsV1Provider_GoogleParams(typing_extensions.TypedDict): + type: typing.Literal["google"] + version: typing_extensions.NotRequired[typing.Literal["v1beta"]] + model: GoogleThinkProviderModel + temperature: typing_extensions.NotRequired[float] + + +class ThinkSettingsV1Provider_GroqParams(typing_extensions.TypedDict): + type: typing.Literal["groq"] + version: typing_extensions.NotRequired[typing.Literal["v1"]] + model: typing.Literal["openai/gpt-oss-20b"] + temperature: typing_extensions.NotRequired[float] + + +ThinkSettingsV1ProviderParams = typing.Union[ + ThinkSettingsV1Provider_OpenAiParams, + ThinkSettingsV1Provider_AwsBedrockParams, + ThinkSettingsV1Provider_AnthropicParams, + ThinkSettingsV1Provider_GoogleParams, + ThinkSettingsV1Provider_GroqParams, +] diff --git a/src/deepgram/self_hosted/v1/__init__.py b/src/deepgram/self_hosted/v1/__init__.py index c0b97e44..7d71b379 100644 --- a/src/deepgram/self_hosted/v1/__init__.py +++ b/src/deepgram/self_hosted/v1/__init__.py @@ -7,12 +7,8 @@ if typing.TYPE_CHECKING: from . import distribution_credentials - from .distribution_credentials import ( - DistributionCredentialsCreateRequestProvider, - DistributionCredentialsCreateRequestScopesItem, - ) + from .distribution_credentials import DistributionCredentialsCreateRequestScopesItem _dynamic_imports: typing.Dict[str, str] = { - "DistributionCredentialsCreateRequestProvider": ".distribution_credentials", "DistributionCredentialsCreateRequestScopesItem": ".distribution_credentials", "distribution_credentials": ".distribution_credentials", } @@ -39,8 +35,4 @@ def __dir__(): return sorted(lazy_attrs) -__all__ = [ - "DistributionCredentialsCreateRequestProvider", - "DistributionCredentialsCreateRequestScopesItem", - "distribution_credentials", -] +__all__ = ["DistributionCredentialsCreateRequestScopesItem", "distribution_credentials"] diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/__init__.py b/src/deepgram/self_hosted/v1/distribution_credentials/__init__.py index cb4dba07..020a49b5 100644 --- a/src/deepgram/self_hosted/v1/distribution_credentials/__init__.py +++ b/src/deepgram/self_hosted/v1/distribution_credentials/__init__.py @@ -6,11 +6,8 @@ from importlib import import_module if typing.TYPE_CHECKING: - from .types import DistributionCredentialsCreateRequestProvider, DistributionCredentialsCreateRequestScopesItem -_dynamic_imports: typing.Dict[str, str] = { - "DistributionCredentialsCreateRequestProvider": ".types", - "DistributionCredentialsCreateRequestScopesItem": ".types", -} + from .types import DistributionCredentialsCreateRequestScopesItem +_dynamic_imports: typing.Dict[str, str] = {"DistributionCredentialsCreateRequestScopesItem": ".types"} def __getattr__(attr_name: str) -> typing.Any: @@ -34,4 +31,4 @@ def __dir__(): return sorted(lazy_attrs) -__all__ = ["DistributionCredentialsCreateRequestProvider", "DistributionCredentialsCreateRequestScopesItem"] +__all__ = ["DistributionCredentialsCreateRequestScopesItem"] diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/client.py b/src/deepgram/self_hosted/v1/distribution_credentials/client.py index e60f4af2..63371d29 100644 --- a/src/deepgram/self_hosted/v1/distribution_credentials/client.py +++ b/src/deepgram/self_hosted/v1/distribution_credentials/client.py @@ -8,7 +8,6 @@ from ....types.get_project_distribution_credentials_v1response import GetProjectDistributionCredentialsV1Response from ....types.list_project_distribution_credentials_v1response import ListProjectDistributionCredentialsV1Response from .raw_client import AsyncRawDistributionCredentialsClient, RawDistributionCredentialsClient -from .types.distribution_credentials_create_request_provider import DistributionCredentialsCreateRequestProvider from .types.distribution_credentials_create_request_scopes_item import DistributionCredentialsCreateRequestScopesItem # this is used as the default value for optional parameters @@ -73,7 +72,7 @@ def create( typing.Sequence[DistributionCredentialsCreateRequestScopesItem], ] ] = None, - provider: typing.Optional[DistributionCredentialsCreateRequestProvider] = None, + provider: typing.Optional[typing.Literal["quay"]] = None, comment: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateProjectDistributionCredentialsV1Response: @@ -88,7 +87,7 @@ def create( scopes : typing.Optional[typing.Union[DistributionCredentialsCreateRequestScopesItem, typing.Sequence[DistributionCredentialsCreateRequestScopesItem]]] List of permission scopes for the credentials - provider : typing.Optional[DistributionCredentialsCreateRequestProvider] + provider : typing.Optional[typing.Literal["quay"]] The provider of the distribution service comment : typing.Optional[str] @@ -111,7 +110,6 @@ def create( ) client.self_hosted.v1.distribution_credentials.create( project_id="123456-7890-1234-5678-901234", - provider="quay", ) """ _response = self._raw_client.create( @@ -268,7 +266,7 @@ async def create( typing.Sequence[DistributionCredentialsCreateRequestScopesItem], ] ] = None, - provider: typing.Optional[DistributionCredentialsCreateRequestProvider] = None, + provider: typing.Optional[typing.Literal["quay"]] = None, comment: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateProjectDistributionCredentialsV1Response: @@ -283,7 +281,7 @@ async def create( scopes : typing.Optional[typing.Union[DistributionCredentialsCreateRequestScopesItem, typing.Sequence[DistributionCredentialsCreateRequestScopesItem]]] List of permission scopes for the credentials - provider : typing.Optional[DistributionCredentialsCreateRequestProvider] + provider : typing.Optional[typing.Literal["quay"]] The provider of the distribution service comment : typing.Optional[str] @@ -311,7 +309,6 @@ async def create( async def main() -> None: await client.self_hosted.v1.distribution_credentials.create( project_id="123456-7890-1234-5678-901234", - provider="quay", ) diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py b/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py index 5861d960..f7e60ef9 100644 --- a/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py +++ b/src/deepgram/self_hosted/v1/distribution_credentials/raw_client.py @@ -13,7 +13,6 @@ from ....types.create_project_distribution_credentials_v1response import CreateProjectDistributionCredentialsV1Response from ....types.get_project_distribution_credentials_v1response import GetProjectDistributionCredentialsV1Response from ....types.list_project_distribution_credentials_v1response import ListProjectDistributionCredentialsV1Response -from .types.distribution_credentials_create_request_provider import DistributionCredentialsCreateRequestProvider from .types.distribution_credentials_create_request_scopes_item import DistributionCredentialsCreateRequestScopesItem # this is used as the default value for optional parameters @@ -85,7 +84,7 @@ def create( typing.Sequence[DistributionCredentialsCreateRequestScopesItem], ] ] = None, - provider: typing.Optional[DistributionCredentialsCreateRequestProvider] = None, + provider: typing.Optional[typing.Literal["quay"]] = None, comment: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[CreateProjectDistributionCredentialsV1Response]: @@ -100,7 +99,7 @@ def create( scopes : typing.Optional[typing.Union[DistributionCredentialsCreateRequestScopesItem, typing.Sequence[DistributionCredentialsCreateRequestScopesItem]]] List of permission scopes for the credentials - provider : typing.Optional[DistributionCredentialsCreateRequestProvider] + provider : typing.Optional[typing.Literal["quay"]] The provider of the distribution service comment : typing.Optional[str] @@ -339,7 +338,7 @@ async def create( typing.Sequence[DistributionCredentialsCreateRequestScopesItem], ] ] = None, - provider: typing.Optional[DistributionCredentialsCreateRequestProvider] = None, + provider: typing.Optional[typing.Literal["quay"]] = None, comment: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> AsyncHttpResponse[CreateProjectDistributionCredentialsV1Response]: @@ -354,7 +353,7 @@ async def create( scopes : typing.Optional[typing.Union[DistributionCredentialsCreateRequestScopesItem, typing.Sequence[DistributionCredentialsCreateRequestScopesItem]]] List of permission scopes for the credentials - provider : typing.Optional[DistributionCredentialsCreateRequestProvider] + provider : typing.Optional[typing.Literal["quay"]] The provider of the distribution service comment : typing.Optional[str] diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/types/__init__.py b/src/deepgram/self_hosted/v1/distribution_credentials/types/__init__.py index 6f834f97..05c1485a 100644 --- a/src/deepgram/self_hosted/v1/distribution_credentials/types/__init__.py +++ b/src/deepgram/self_hosted/v1/distribution_credentials/types/__init__.py @@ -6,11 +6,9 @@ from importlib import import_module if typing.TYPE_CHECKING: - from .distribution_credentials_create_request_provider import DistributionCredentialsCreateRequestProvider from .distribution_credentials_create_request_scopes_item import DistributionCredentialsCreateRequestScopesItem _dynamic_imports: typing.Dict[str, str] = { - "DistributionCredentialsCreateRequestProvider": ".distribution_credentials_create_request_provider", - "DistributionCredentialsCreateRequestScopesItem": ".distribution_credentials_create_request_scopes_item", + "DistributionCredentialsCreateRequestScopesItem": ".distribution_credentials_create_request_scopes_item" } @@ -35,4 +33,4 @@ def __dir__(): return sorted(lazy_attrs) -__all__ = ["DistributionCredentialsCreateRequestProvider", "DistributionCredentialsCreateRequestScopesItem"] +__all__ = ["DistributionCredentialsCreateRequestScopesItem"] diff --git a/src/deepgram/self_hosted/v1/distribution_credentials/types/distribution_credentials_create_request_provider.py b/src/deepgram/self_hosted/v1/distribution_credentials/types/distribution_credentials_create_request_provider.py deleted file mode 100644 index 20487087..00000000 --- a/src/deepgram/self_hosted/v1/distribution_credentials/types/distribution_credentials_create_request_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DistributionCredentialsCreateRequestProvider = typing.Union[typing.Literal["quay"], typing.Any] diff --git a/src/deepgram/speak/__init__.py b/src/deepgram/speak/__init__.py index 3adcdd07..73eda24c 100644 --- a/src/deepgram/speak/__init__.py +++ b/src/deepgram/speak/__init__.py @@ -25,13 +25,10 @@ SpeakV1FlushedType, SpeakV1Metadata, SpeakV1MetadataParams, - SpeakV1MetadataType, SpeakV1Text, SpeakV1TextParams, - SpeakV1TextType, SpeakV1Warning, SpeakV1WarningParams, - SpeakV1WarningType, ) _dynamic_imports: typing.Dict[str, str] = { "SpeakV1Clear": ".v1", @@ -51,13 +48,10 @@ "SpeakV1FlushedType": ".v1", "SpeakV1Metadata": ".v1", "SpeakV1MetadataParams": ".v1", - "SpeakV1MetadataType": ".v1", "SpeakV1Text": ".v1", "SpeakV1TextParams": ".v1", - "SpeakV1TextType": ".v1", "SpeakV1Warning": ".v1", "SpeakV1WarningParams": ".v1", - "SpeakV1WarningType": ".v1", "v1": ".v1", } @@ -101,12 +95,9 @@ def __dir__(): "SpeakV1FlushedType", "SpeakV1Metadata", "SpeakV1MetadataParams", - "SpeakV1MetadataType", "SpeakV1Text", "SpeakV1TextParams", - "SpeakV1TextType", "SpeakV1Warning", "SpeakV1WarningParams", - "SpeakV1WarningType", "v1", ] diff --git a/src/deepgram/speak/v1/__init__.py b/src/deepgram/speak/v1/__init__.py index 54c2fbfc..c874ef09 100644 --- a/src/deepgram/speak/v1/__init__.py +++ b/src/deepgram/speak/v1/__init__.py @@ -18,11 +18,8 @@ SpeakV1Flushed, SpeakV1FlushedType, SpeakV1Metadata, - SpeakV1MetadataType, SpeakV1Text, - SpeakV1TextType, SpeakV1Warning, - SpeakV1WarningType, ) from . import audio from .audio import ( @@ -63,13 +60,10 @@ "SpeakV1FlushedType": ".types", "SpeakV1Metadata": ".types", "SpeakV1MetadataParams": ".requests", - "SpeakV1MetadataType": ".types", "SpeakV1Text": ".types", "SpeakV1TextParams": ".requests", - "SpeakV1TextType": ".types", "SpeakV1Warning": ".types", "SpeakV1WarningParams": ".requests", - "SpeakV1WarningType": ".types", "audio": ".audio", } @@ -117,12 +111,9 @@ def __dir__(): "SpeakV1FlushedType", "SpeakV1Metadata", "SpeakV1MetadataParams", - "SpeakV1MetadataType", "SpeakV1Text", "SpeakV1TextParams", - "SpeakV1TextType", "SpeakV1Warning", "SpeakV1WarningParams", - "SpeakV1WarningType", "audio", ] diff --git a/src/deepgram/speak/v1/requests/speak_v1metadata.py b/src/deepgram/speak/v1/requests/speak_v1metadata.py index 9c618ab3..89fb6809 100644 --- a/src/deepgram/speak/v1/requests/speak_v1metadata.py +++ b/src/deepgram/speak/v1/requests/speak_v1metadata.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.speak_v1metadata_type import SpeakV1MetadataType class SpeakV1MetadataParams(typing_extensions.TypedDict): - type: SpeakV1MetadataType + type: typing.Literal["Metadata"] """ Message type identifier """ diff --git a/src/deepgram/speak/v1/requests/speak_v1text.py b/src/deepgram/speak/v1/requests/speak_v1text.py index 1f66d1c9..78873194 100644 --- a/src/deepgram/speak/v1/requests/speak_v1text.py +++ b/src/deepgram/speak/v1/requests/speak_v1text.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.speak_v1text_type import SpeakV1TextType class SpeakV1TextParams(typing_extensions.TypedDict): - type: SpeakV1TextType + type: typing.Literal["Speak"] """ Message type identifier """ diff --git a/src/deepgram/speak/v1/requests/speak_v1warning.py b/src/deepgram/speak/v1/requests/speak_v1warning.py index 69969e93..ca6c78f8 100644 --- a/src/deepgram/speak/v1/requests/speak_v1warning.py +++ b/src/deepgram/speak/v1/requests/speak_v1warning.py @@ -1,11 +1,12 @@ # This file was auto-generated by Fern from our API Definition. +import typing + import typing_extensions -from ..types.speak_v1warning_type import SpeakV1WarningType class SpeakV1WarningParams(typing_extensions.TypedDict): - type: SpeakV1WarningType + type: typing.Literal["Warning"] """ Message type identifier """ diff --git a/src/deepgram/speak/v1/socket_client.py b/src/deepgram/speak/v1/socket_client.py index 671e0bd2..461c8190 100644 --- a/src/deepgram/speak/v1/socket_client.py +++ b/src/deepgram/speak/v1/socket_client.py @@ -38,7 +38,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -63,14 +63,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -82,26 +82,26 @@ async def send_text(self, message: SpeakV1Text) -> None: """ await self._send_model(message) - async def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: + async def send_flush(self, message: SpeakV1Flush) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Flush. """ - await self._send_model(message or SpeakV1Flush(type="Flush")) + await self._send_model(message) - async def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: + async def send_clear(self, message: SpeakV1Clear) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Clear. """ - await self._send_model(message or SpeakV1Clear(type="Clear")) + await self._send_model(message) - async def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: + async def send_close(self, message: SpeakV1Close) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Close. """ - await self._send_model(message or SpeakV1Close(type="Close")) + await self._send_model(message) async def recv(self) -> V1SocketClientResponse: """ @@ -112,7 +112,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -143,7 +143,7 @@ def __iter__(self): yield message else: try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore + yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -168,14 +168,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except Exception as exc: + except (websockets.WebSocketException, JSONDecodeError) as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -187,26 +187,26 @@ def send_text(self, message: SpeakV1Text) -> None: """ self._send_model(message) - def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: + def send_flush(self, message: SpeakV1Flush) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Flush. """ - self._send_model(message or SpeakV1Flush(type="Flush")) + self._send_model(message) - def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: + def send_clear(self, message: SpeakV1Clear) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Clear. """ - self._send_model(message or SpeakV1Clear(type="Clear")) + self._send_model(message) - def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: + def send_close(self, message: SpeakV1Close) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Close. """ - self._send_model(message or SpeakV1Close(type="Close")) + self._send_model(message) def recv(self) -> V1SocketClientResponse: """ @@ -217,7 +217,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore + return construct_type(V1SocketClientResponse, json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore diff --git a/src/deepgram/speak/v1/types/__init__.py b/src/deepgram/speak/v1/types/__init__.py index cc5ee645..72a25d1b 100644 --- a/src/deepgram/speak/v1/types/__init__.py +++ b/src/deepgram/speak/v1/types/__init__.py @@ -17,11 +17,8 @@ from .speak_v1flushed import SpeakV1Flushed from .speak_v1flushed_type import SpeakV1FlushedType from .speak_v1metadata import SpeakV1Metadata - from .speak_v1metadata_type import SpeakV1MetadataType from .speak_v1text import SpeakV1Text - from .speak_v1text_type import SpeakV1TextType from .speak_v1warning import SpeakV1Warning - from .speak_v1warning_type import SpeakV1WarningType _dynamic_imports: typing.Dict[str, str] = { "SpeakV1Clear": ".speak_v1clear", "SpeakV1ClearType": ".speak_v1clear_type", @@ -34,11 +31,8 @@ "SpeakV1Flushed": ".speak_v1flushed", "SpeakV1FlushedType": ".speak_v1flushed_type", "SpeakV1Metadata": ".speak_v1metadata", - "SpeakV1MetadataType": ".speak_v1metadata_type", "SpeakV1Text": ".speak_v1text", - "SpeakV1TextType": ".speak_v1text_type", "SpeakV1Warning": ".speak_v1warning", - "SpeakV1WarningType": ".speak_v1warning_type", } @@ -75,9 +69,6 @@ def __dir__(): "SpeakV1Flushed", "SpeakV1FlushedType", "SpeakV1Metadata", - "SpeakV1MetadataType", "SpeakV1Text", - "SpeakV1TextType", "SpeakV1Warning", - "SpeakV1WarningType", ] diff --git a/src/deepgram/speak/v1/types/speak_v1metadata.py b/src/deepgram/speak/v1/types/speak_v1metadata.py index 37ad50a8..e009a764 100644 --- a/src/deepgram/speak/v1/types/speak_v1metadata.py +++ b/src/deepgram/speak/v1/types/speak_v1metadata.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .speak_v1metadata_type import SpeakV1MetadataType class SpeakV1Metadata(UncheckedBaseModel): - type: SpeakV1MetadataType = pydantic.Field() + type: typing.Literal["Metadata"] = pydantic.Field(default="Metadata") """ Message type identifier """ diff --git a/src/deepgram/speak/v1/types/speak_v1metadata_type.py b/src/deepgram/speak/v1/types/speak_v1metadata_type.py deleted file mode 100644 index 70b2b693..00000000 --- a/src/deepgram/speak/v1/types/speak_v1metadata_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeakV1MetadataType = typing.Union[typing.Literal["Metadata"], typing.Any] diff --git a/src/deepgram/speak/v1/types/speak_v1text.py b/src/deepgram/speak/v1/types/speak_v1text.py index 89f1a445..3a566770 100644 --- a/src/deepgram/speak/v1/types/speak_v1text.py +++ b/src/deepgram/speak/v1/types/speak_v1text.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .speak_v1text_type import SpeakV1TextType class SpeakV1Text(UncheckedBaseModel): - type: SpeakV1TextType = pydantic.Field() + type: typing.Literal["Speak"] = pydantic.Field(default="Speak") """ Message type identifier """ diff --git a/src/deepgram/speak/v1/types/speak_v1warning.py b/src/deepgram/speak/v1/types/speak_v1warning.py index 2219f352..af374191 100644 --- a/src/deepgram/speak/v1/types/speak_v1warning.py +++ b/src/deepgram/speak/v1/types/speak_v1warning.py @@ -5,11 +5,10 @@ import pydantic from ....core.pydantic_utilities import IS_PYDANTIC_V2 from ....core.unchecked_base_model import UncheckedBaseModel -from .speak_v1warning_type import SpeakV1WarningType class SpeakV1Warning(UncheckedBaseModel): - type: SpeakV1WarningType = pydantic.Field() + type: typing.Literal["Warning"] = pydantic.Field(default="Warning") """ Message type identifier """ diff --git a/src/deepgram/speak/v1/types/speak_v1warning_type.py b/src/deepgram/speak/v1/types/speak_v1warning_type.py deleted file mode 100644 index 7d0ede31..00000000 --- a/src/deepgram/speak/v1/types/speak_v1warning_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeakV1WarningType = typing.Union[typing.Literal["Warning"], typing.Any] diff --git a/src/deepgram/types/__init__.py b/src/deepgram/types/__init__.py index 291d136f..f2bc3ede 100644 --- a/src/deepgram/types/__init__.py +++ b/src/deepgram/types/__init__.py @@ -6,34 +6,37 @@ from importlib import import_module if typing.TYPE_CHECKING: + from .agent_configuration_v1 import AgentConfigurationV1 from .agent_think_models_v1response import AgentThinkModelsV1Response from .agent_think_models_v1response_models_item import AgentThinkModelsV1ResponseModelsItem from .agent_think_models_v1response_models_item_id import AgentThinkModelsV1ResponseModelsItemId - from .agent_think_models_v1response_models_item_id_provider import AgentThinkModelsV1ResponseModelsItemIdProvider from .agent_think_models_v1response_models_item_one import AgentThinkModelsV1ResponseModelsItemOne from .agent_think_models_v1response_models_item_one_id import AgentThinkModelsV1ResponseModelsItemOneId - from .agent_think_models_v1response_models_item_one_provider import AgentThinkModelsV1ResponseModelsItemOneProvider from .agent_think_models_v1response_models_item_three import AgentThinkModelsV1ResponseModelsItemThree - from .agent_think_models_v1response_models_item_three_id import AgentThinkModelsV1ResponseModelsItemThreeId - from .agent_think_models_v1response_models_item_three_provider import ( - AgentThinkModelsV1ResponseModelsItemThreeProvider, - ) from .agent_think_models_v1response_models_item_two import AgentThinkModelsV1ResponseModelsItemTwo from .agent_think_models_v1response_models_item_two_id import AgentThinkModelsV1ResponseModelsItemTwoId - from .agent_think_models_v1response_models_item_two_provider import AgentThinkModelsV1ResponseModelsItemTwoProvider from .agent_think_models_v1response_models_item_zero import AgentThinkModelsV1ResponseModelsItemZero from .agent_think_models_v1response_models_item_zero_id import AgentThinkModelsV1ResponseModelsItemZeroId - from .agent_think_models_v1response_models_item_zero_provider import ( - AgentThinkModelsV1ResponseModelsItemZeroProvider, - ) + from .agent_variable_v1 import AgentVariableV1 from .anthropic import Anthropic + from .anthropic_think_provider_model import AnthropicThinkProviderModel from .aws_bedrock_think_provider import AwsBedrockThinkProvider + from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentials + from .aws_bedrock_think_provider_credentials_type import AwsBedrockThinkProviderCredentialsType + from .aws_bedrock_think_provider_model import AwsBedrockThinkProviderModel from .aws_polly_speak_provider import AwsPollySpeakProvider + from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentials + from .aws_polly_speak_provider_credentials_type import AwsPollySpeakProviderCredentialsType + from .aws_polly_speak_provider_engine import AwsPollySpeakProviderEngine + from .aws_polly_speak_provider_voice import AwsPollySpeakProviderVoice from .billing_breakdown_v1response import BillingBreakdownV1Response from .billing_breakdown_v1response_resolution import BillingBreakdownV1ResponseResolution from .billing_breakdown_v1response_results_item import BillingBreakdownV1ResponseResultsItem from .billing_breakdown_v1response_results_item_grouping import BillingBreakdownV1ResponseResultsItemGrouping from .cartesia import Cartesia + from .cartesia_speak_provider_model_id import CartesiaSpeakProviderModelId + from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoice + from .create_agent_configuration_v1response import CreateAgentConfigurationV1Response from .create_key_v1request_one import CreateKeyV1RequestOne from .create_key_v1response import CreateKeyV1Response from .create_project_distribution_credentials_v1response import CreateProjectDistributionCredentialsV1Response @@ -45,11 +48,15 @@ ) from .create_project_invite_v1response import CreateProjectInviteV1Response from .deepgram import Deepgram + from .deepgram_speak_provider_model import DeepgramSpeakProviderModel + from .delete_agent_configuration_v1response import DeleteAgentConfigurationV1Response + from .delete_agent_variable_v1response import DeleteAgentVariableV1Response from .delete_project_invite_v1response import DeleteProjectInviteV1Response from .delete_project_key_v1response import DeleteProjectKeyV1Response from .delete_project_member_v1response import DeleteProjectMemberV1Response from .delete_project_v1response import DeleteProjectV1Response from .eleven_labs_speak_provider import ElevenLabsSpeakProvider + from .eleven_labs_speak_provider_model_id import ElevenLabsSpeakProviderModelId from .error_response import ErrorResponse from .error_response_legacy_error import ErrorResponseLegacyError from .error_response_modern_error import ErrorResponseModernError @@ -73,9 +80,12 @@ from .get_project_request_v1response import GetProjectRequestV1Response from .get_project_v1response import GetProjectV1Response from .google import Google + from .google_think_provider_model import GoogleThinkProviderModel from .grant_v1response import GrantV1Response from .groq import Groq from .leave_project_v1response import LeaveProjectV1Response + from .list_agent_configurations_v1response import ListAgentConfigurationsV1Response + from .list_agent_variables_v1response import ListAgentVariablesV1Response from .list_billing_fields_v1response import ListBillingFieldsV1Response from .list_billing_fields_v1response_deployments_item import ListBillingFieldsV1ResponseDeploymentsItem from .list_models_v1response import ListModelsV1Response @@ -189,7 +199,10 @@ from .listen_v2sample_rate import ListenV2SampleRate from .listen_v2tag import ListenV2Tag from .open_ai_speak_provider import OpenAiSpeakProvider + from .open_ai_speak_provider_model import OpenAiSpeakProviderModel + from .open_ai_speak_provider_voice import OpenAiSpeakProviderVoice from .open_ai_think_provider import OpenAiThinkProvider + from .open_ai_think_provider_model import OpenAiThinkProviderModel from .project_request_response import ProjectRequestResponse from .read_v1request import ReadV1Request from .read_v1request_text import ReadV1RequestText @@ -221,12 +234,33 @@ from .shared_topics_results_topics_segments_item import SharedTopicsResultsTopicsSegmentsItem from .shared_topics_results_topics_segments_item_topics_item import SharedTopicsResultsTopicsSegmentsItemTopicsItem from .speak_settings_v1 import SpeakSettingsV1 + from .speak_settings_v1endpoint import SpeakSettingsV1Endpoint + from .speak_settings_v1provider import ( + SpeakSettingsV1Provider, + SpeakSettingsV1Provider_AwsPolly, + SpeakSettingsV1Provider_Cartesia, + SpeakSettingsV1Provider_Deepgram, + SpeakSettingsV1Provider_ElevenLabs, + SpeakSettingsV1Provider_OpenAi, + ) from .speak_v1encoding import SpeakV1Encoding from .speak_v1mip_opt_out import SpeakV1MipOptOut from .speak_v1model import SpeakV1Model from .speak_v1response import SpeakV1Response from .speak_v1sample_rate import SpeakV1SampleRate from .think_settings_v1 import ThinkSettingsV1 + from .think_settings_v1context_length import ThinkSettingsV1ContextLength + from .think_settings_v1endpoint import ThinkSettingsV1Endpoint + from .think_settings_v1functions_item import ThinkSettingsV1FunctionsItem + from .think_settings_v1functions_item_endpoint import ThinkSettingsV1FunctionsItemEndpoint + from .think_settings_v1provider import ( + ThinkSettingsV1Provider, + ThinkSettingsV1Provider_Anthropic, + ThinkSettingsV1Provider_AwsBedrock, + ThinkSettingsV1Provider_Google, + ThinkSettingsV1Provider_Groq, + ThinkSettingsV1Provider_OpenAi, + ) from .update_project_member_scopes_v1response import UpdateProjectMemberScopesV1Response from .update_project_v1response import UpdateProjectV1Response from .usage_breakdown_v1response import UsageBreakdownV1Response @@ -238,30 +272,37 @@ from .usage_v1response import UsageV1Response from .usage_v1response_resolution import UsageV1ResponseResolution _dynamic_imports: typing.Dict[str, str] = { + "AgentConfigurationV1": ".agent_configuration_v1", "AgentThinkModelsV1Response": ".agent_think_models_v1response", "AgentThinkModelsV1ResponseModelsItem": ".agent_think_models_v1response_models_item", "AgentThinkModelsV1ResponseModelsItemId": ".agent_think_models_v1response_models_item_id", - "AgentThinkModelsV1ResponseModelsItemIdProvider": ".agent_think_models_v1response_models_item_id_provider", "AgentThinkModelsV1ResponseModelsItemOne": ".agent_think_models_v1response_models_item_one", "AgentThinkModelsV1ResponseModelsItemOneId": ".agent_think_models_v1response_models_item_one_id", - "AgentThinkModelsV1ResponseModelsItemOneProvider": ".agent_think_models_v1response_models_item_one_provider", "AgentThinkModelsV1ResponseModelsItemThree": ".agent_think_models_v1response_models_item_three", - "AgentThinkModelsV1ResponseModelsItemThreeId": ".agent_think_models_v1response_models_item_three_id", - "AgentThinkModelsV1ResponseModelsItemThreeProvider": ".agent_think_models_v1response_models_item_three_provider", "AgentThinkModelsV1ResponseModelsItemTwo": ".agent_think_models_v1response_models_item_two", "AgentThinkModelsV1ResponseModelsItemTwoId": ".agent_think_models_v1response_models_item_two_id", - "AgentThinkModelsV1ResponseModelsItemTwoProvider": ".agent_think_models_v1response_models_item_two_provider", "AgentThinkModelsV1ResponseModelsItemZero": ".agent_think_models_v1response_models_item_zero", "AgentThinkModelsV1ResponseModelsItemZeroId": ".agent_think_models_v1response_models_item_zero_id", - "AgentThinkModelsV1ResponseModelsItemZeroProvider": ".agent_think_models_v1response_models_item_zero_provider", + "AgentVariableV1": ".agent_variable_v1", "Anthropic": ".anthropic", + "AnthropicThinkProviderModel": ".anthropic_think_provider_model", "AwsBedrockThinkProvider": ".aws_bedrock_think_provider", + "AwsBedrockThinkProviderCredentials": ".aws_bedrock_think_provider_credentials", + "AwsBedrockThinkProviderCredentialsType": ".aws_bedrock_think_provider_credentials_type", + "AwsBedrockThinkProviderModel": ".aws_bedrock_think_provider_model", "AwsPollySpeakProvider": ".aws_polly_speak_provider", + "AwsPollySpeakProviderCredentials": ".aws_polly_speak_provider_credentials", + "AwsPollySpeakProviderCredentialsType": ".aws_polly_speak_provider_credentials_type", + "AwsPollySpeakProviderEngine": ".aws_polly_speak_provider_engine", + "AwsPollySpeakProviderVoice": ".aws_polly_speak_provider_voice", "BillingBreakdownV1Response": ".billing_breakdown_v1response", "BillingBreakdownV1ResponseResolution": ".billing_breakdown_v1response_resolution", "BillingBreakdownV1ResponseResultsItem": ".billing_breakdown_v1response_results_item", "BillingBreakdownV1ResponseResultsItemGrouping": ".billing_breakdown_v1response_results_item_grouping", "Cartesia": ".cartesia", + "CartesiaSpeakProviderModelId": ".cartesia_speak_provider_model_id", + "CartesiaSpeakProviderVoice": ".cartesia_speak_provider_voice", + "CreateAgentConfigurationV1Response": ".create_agent_configuration_v1response", "CreateKeyV1RequestOne": ".create_key_v1request_one", "CreateKeyV1Response": ".create_key_v1response", "CreateProjectDistributionCredentialsV1Response": ".create_project_distribution_credentials_v1response", @@ -269,11 +310,15 @@ "CreateProjectDistributionCredentialsV1ResponseMember": ".create_project_distribution_credentials_v1response_member", "CreateProjectInviteV1Response": ".create_project_invite_v1response", "Deepgram": ".deepgram", + "DeepgramSpeakProviderModel": ".deepgram_speak_provider_model", + "DeleteAgentConfigurationV1Response": ".delete_agent_configuration_v1response", + "DeleteAgentVariableV1Response": ".delete_agent_variable_v1response", "DeleteProjectInviteV1Response": ".delete_project_invite_v1response", "DeleteProjectKeyV1Response": ".delete_project_key_v1response", "DeleteProjectMemberV1Response": ".delete_project_member_v1response", "DeleteProjectV1Response": ".delete_project_v1response", "ElevenLabsSpeakProvider": ".eleven_labs_speak_provider", + "ElevenLabsSpeakProviderModelId": ".eleven_labs_speak_provider_model_id", "ErrorResponse": ".error_response", "ErrorResponseLegacyError": ".error_response_legacy_error", "ErrorResponseModernError": ".error_response_modern_error", @@ -293,9 +338,12 @@ "GetProjectRequestV1Response": ".get_project_request_v1response", "GetProjectV1Response": ".get_project_v1response", "Google": ".google", + "GoogleThinkProviderModel": ".google_think_provider_model", "GrantV1Response": ".grant_v1response", "Groq": ".groq", "LeaveProjectV1Response": ".leave_project_v1response", + "ListAgentConfigurationsV1Response": ".list_agent_configurations_v1response", + "ListAgentVariablesV1Response": ".list_agent_variables_v1response", "ListBillingFieldsV1Response": ".list_billing_fields_v1response", "ListBillingFieldsV1ResponseDeploymentsItem": ".list_billing_fields_v1response_deployments_item", "ListModelsV1Response": ".list_models_v1response", @@ -385,7 +433,10 @@ "ListenV2SampleRate": ".listen_v2sample_rate", "ListenV2Tag": ".listen_v2tag", "OpenAiSpeakProvider": ".open_ai_speak_provider", + "OpenAiSpeakProviderModel": ".open_ai_speak_provider_model", + "OpenAiSpeakProviderVoice": ".open_ai_speak_provider_voice", "OpenAiThinkProvider": ".open_ai_think_provider", + "OpenAiThinkProviderModel": ".open_ai_think_provider_model", "ProjectRequestResponse": ".project_request_response", "ReadV1Request": ".read_v1request", "ReadV1RequestText": ".read_v1request_text", @@ -415,12 +466,29 @@ "SharedTopicsResultsTopicsSegmentsItem": ".shared_topics_results_topics_segments_item", "SharedTopicsResultsTopicsSegmentsItemTopicsItem": ".shared_topics_results_topics_segments_item_topics_item", "SpeakSettingsV1": ".speak_settings_v1", + "SpeakSettingsV1Endpoint": ".speak_settings_v1endpoint", + "SpeakSettingsV1Provider": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_AwsPolly": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_Cartesia": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_Deepgram": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_ElevenLabs": ".speak_settings_v1provider", + "SpeakSettingsV1Provider_OpenAi": ".speak_settings_v1provider", "SpeakV1Encoding": ".speak_v1encoding", "SpeakV1MipOptOut": ".speak_v1mip_opt_out", "SpeakV1Model": ".speak_v1model", "SpeakV1Response": ".speak_v1response", "SpeakV1SampleRate": ".speak_v1sample_rate", "ThinkSettingsV1": ".think_settings_v1", + "ThinkSettingsV1ContextLength": ".think_settings_v1context_length", + "ThinkSettingsV1Endpoint": ".think_settings_v1endpoint", + "ThinkSettingsV1FunctionsItem": ".think_settings_v1functions_item", + "ThinkSettingsV1FunctionsItemEndpoint": ".think_settings_v1functions_item_endpoint", + "ThinkSettingsV1Provider": ".think_settings_v1provider", + "ThinkSettingsV1Provider_Anthropic": ".think_settings_v1provider", + "ThinkSettingsV1Provider_AwsBedrock": ".think_settings_v1provider", + "ThinkSettingsV1Provider_Google": ".think_settings_v1provider", + "ThinkSettingsV1Provider_Groq": ".think_settings_v1provider", + "ThinkSettingsV1Provider_OpenAi": ".think_settings_v1provider", "UpdateProjectMemberScopesV1Response": ".update_project_member_scopes_v1response", "UpdateProjectV1Response": ".update_project_v1response", "UsageBreakdownV1Response": ".usage_breakdown_v1response", @@ -456,30 +524,37 @@ def __dir__(): __all__ = [ + "AgentConfigurationV1", "AgentThinkModelsV1Response", "AgentThinkModelsV1ResponseModelsItem", "AgentThinkModelsV1ResponseModelsItemId", - "AgentThinkModelsV1ResponseModelsItemIdProvider", "AgentThinkModelsV1ResponseModelsItemOne", "AgentThinkModelsV1ResponseModelsItemOneId", - "AgentThinkModelsV1ResponseModelsItemOneProvider", "AgentThinkModelsV1ResponseModelsItemThree", - "AgentThinkModelsV1ResponseModelsItemThreeId", - "AgentThinkModelsV1ResponseModelsItemThreeProvider", "AgentThinkModelsV1ResponseModelsItemTwo", "AgentThinkModelsV1ResponseModelsItemTwoId", - "AgentThinkModelsV1ResponseModelsItemTwoProvider", "AgentThinkModelsV1ResponseModelsItemZero", "AgentThinkModelsV1ResponseModelsItemZeroId", - "AgentThinkModelsV1ResponseModelsItemZeroProvider", + "AgentVariableV1", "Anthropic", + "AnthropicThinkProviderModel", "AwsBedrockThinkProvider", + "AwsBedrockThinkProviderCredentials", + "AwsBedrockThinkProviderCredentialsType", + "AwsBedrockThinkProviderModel", "AwsPollySpeakProvider", + "AwsPollySpeakProviderCredentials", + "AwsPollySpeakProviderCredentialsType", + "AwsPollySpeakProviderEngine", + "AwsPollySpeakProviderVoice", "BillingBreakdownV1Response", "BillingBreakdownV1ResponseResolution", "BillingBreakdownV1ResponseResultsItem", "BillingBreakdownV1ResponseResultsItemGrouping", "Cartesia", + "CartesiaSpeakProviderModelId", + "CartesiaSpeakProviderVoice", + "CreateAgentConfigurationV1Response", "CreateKeyV1RequestOne", "CreateKeyV1Response", "CreateProjectDistributionCredentialsV1Response", @@ -487,11 +562,15 @@ def __dir__(): "CreateProjectDistributionCredentialsV1ResponseMember", "CreateProjectInviteV1Response", "Deepgram", + "DeepgramSpeakProviderModel", + "DeleteAgentConfigurationV1Response", + "DeleteAgentVariableV1Response", "DeleteProjectInviteV1Response", "DeleteProjectKeyV1Response", "DeleteProjectMemberV1Response", "DeleteProjectV1Response", "ElevenLabsSpeakProvider", + "ElevenLabsSpeakProviderModelId", "ErrorResponse", "ErrorResponseLegacyError", "ErrorResponseModernError", @@ -511,9 +590,12 @@ def __dir__(): "GetProjectRequestV1Response", "GetProjectV1Response", "Google", + "GoogleThinkProviderModel", "GrantV1Response", "Groq", "LeaveProjectV1Response", + "ListAgentConfigurationsV1Response", + "ListAgentVariablesV1Response", "ListBillingFieldsV1Response", "ListBillingFieldsV1ResponseDeploymentsItem", "ListModelsV1Response", @@ -603,7 +685,10 @@ def __dir__(): "ListenV2SampleRate", "ListenV2Tag", "OpenAiSpeakProvider", + "OpenAiSpeakProviderModel", + "OpenAiSpeakProviderVoice", "OpenAiThinkProvider", + "OpenAiThinkProviderModel", "ProjectRequestResponse", "ReadV1Request", "ReadV1RequestText", @@ -633,12 +718,29 @@ def __dir__(): "SharedTopicsResultsTopicsSegmentsItem", "SharedTopicsResultsTopicsSegmentsItemTopicsItem", "SpeakSettingsV1", + "SpeakSettingsV1Endpoint", + "SpeakSettingsV1Provider", + "SpeakSettingsV1Provider_AwsPolly", + "SpeakSettingsV1Provider_Cartesia", + "SpeakSettingsV1Provider_Deepgram", + "SpeakSettingsV1Provider_ElevenLabs", + "SpeakSettingsV1Provider_OpenAi", "SpeakV1Encoding", "SpeakV1MipOptOut", "SpeakV1Model", "SpeakV1Response", "SpeakV1SampleRate", "ThinkSettingsV1", + "ThinkSettingsV1ContextLength", + "ThinkSettingsV1Endpoint", + "ThinkSettingsV1FunctionsItem", + "ThinkSettingsV1FunctionsItemEndpoint", + "ThinkSettingsV1Provider", + "ThinkSettingsV1Provider_Anthropic", + "ThinkSettingsV1Provider_AwsBedrock", + "ThinkSettingsV1Provider_Google", + "ThinkSettingsV1Provider_Groq", + "ThinkSettingsV1Provider_OpenAi", "UpdateProjectMemberScopesV1Response", "UpdateProjectV1Response", "UsageBreakdownV1Response", diff --git a/src/deepgram/types/agent_configuration_v1.py b/src/deepgram/types/agent_configuration_v1.py new file mode 100644 index 00000000..bdce6c2d --- /dev/null +++ b/src/deepgram/types/agent_configuration_v1.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class AgentConfigurationV1(UncheckedBaseModel): + """ + A reusable agent configuration + """ + + agent_id: str = pydantic.Field() + """ + The unique identifier of the agent configuration + """ + + config: typing.Dict[str, typing.Any] = pydantic.Field() + """ + The agent configuration object + """ + + metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + Timestamp when the configuration was created + """ + + updated_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + Timestamp when the configuration was last updated + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_id.py b/src/deepgram/types/agent_think_models_v1response_models_item_id.py index 786ef2be..e203c954 100644 --- a/src/deepgram/types/agent_think_models_v1response_models_item_id.py +++ b/src/deepgram/types/agent_think_models_v1response_models_item_id.py @@ -5,7 +5,6 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_think_models_v1response_models_item_id_provider import AgentThinkModelsV1ResponseModelsItemIdProvider class AgentThinkModelsV1ResponseModelsItemId(UncheckedBaseModel): @@ -23,7 +22,7 @@ class AgentThinkModelsV1ResponseModelsItemId(UncheckedBaseModel): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemIdProvider = pydantic.Field() + provider: typing.Literal["aws_bedrock"] = pydantic.Field(default="aws_bedrock") """ The provider of the model """ diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_id_provider.py b/src/deepgram/types/agent_think_models_v1response_models_item_id_provider.py deleted file mode 100644 index 51e56a22..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_id_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemIdProvider = typing.Union[typing.Literal["aws_bedrock"], typing.Any] diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_one.py b/src/deepgram/types/agent_think_models_v1response_models_item_one.py index 0aac30e8..63981a6d 100644 --- a/src/deepgram/types/agent_think_models_v1response_models_item_one.py +++ b/src/deepgram/types/agent_think_models_v1response_models_item_one.py @@ -6,7 +6,6 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel from .agent_think_models_v1response_models_item_one_id import AgentThinkModelsV1ResponseModelsItemOneId -from .agent_think_models_v1response_models_item_one_provider import AgentThinkModelsV1ResponseModelsItemOneProvider class AgentThinkModelsV1ResponseModelsItemOne(UncheckedBaseModel): @@ -24,7 +23,7 @@ class AgentThinkModelsV1ResponseModelsItemOne(UncheckedBaseModel): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemOneProvider = pydantic.Field() + provider: typing.Literal["anthropic"] = pydantic.Field(default="anthropic") """ The provider of the model """ diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_one_provider.py b/src/deepgram/types/agent_think_models_v1response_models_item_one_provider.py deleted file mode 100644 index f00753d1..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_one_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemOneProvider = typing.Union[typing.Literal["anthropic"], typing.Any] diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_three.py b/src/deepgram/types/agent_think_models_v1response_models_item_three.py index 391eca35..2e8118f9 100644 --- a/src/deepgram/types/agent_think_models_v1response_models_item_three.py +++ b/src/deepgram/types/agent_think_models_v1response_models_item_three.py @@ -5,8 +5,6 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel -from .agent_think_models_v1response_models_item_three_id import AgentThinkModelsV1ResponseModelsItemThreeId -from .agent_think_models_v1response_models_item_three_provider import AgentThinkModelsV1ResponseModelsItemThreeProvider class AgentThinkModelsV1ResponseModelsItemThree(UncheckedBaseModel): @@ -14,7 +12,7 @@ class AgentThinkModelsV1ResponseModelsItemThree(UncheckedBaseModel): Groq models """ - id: AgentThinkModelsV1ResponseModelsItemThreeId = pydantic.Field() + id: typing.Literal["openai/gpt-oss-20b"] = pydantic.Field(default="openai/gpt-oss-20b") """ The unique identifier of the Groq model """ @@ -24,7 +22,7 @@ class AgentThinkModelsV1ResponseModelsItemThree(UncheckedBaseModel): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemThreeProvider = pydantic.Field() + provider: typing.Literal["groq"] = pydantic.Field(default="groq") """ The provider of the model """ diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_three_id.py b/src/deepgram/types/agent_think_models_v1response_models_item_three_id.py deleted file mode 100644 index c318c675..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_three_id.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemThreeId = typing.Union[typing.Literal["openai/gpt-oss-20b"], typing.Any] diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_three_provider.py b/src/deepgram/types/agent_think_models_v1response_models_item_three_provider.py deleted file mode 100644 index 0bf80206..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_three_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemThreeProvider = typing.Union[typing.Literal["groq"], typing.Any] diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_two.py b/src/deepgram/types/agent_think_models_v1response_models_item_two.py index c38a46dc..97e3f604 100644 --- a/src/deepgram/types/agent_think_models_v1response_models_item_two.py +++ b/src/deepgram/types/agent_think_models_v1response_models_item_two.py @@ -6,7 +6,6 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel from .agent_think_models_v1response_models_item_two_id import AgentThinkModelsV1ResponseModelsItemTwoId -from .agent_think_models_v1response_models_item_two_provider import AgentThinkModelsV1ResponseModelsItemTwoProvider class AgentThinkModelsV1ResponseModelsItemTwo(UncheckedBaseModel): @@ -24,7 +23,7 @@ class AgentThinkModelsV1ResponseModelsItemTwo(UncheckedBaseModel): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemTwoProvider = pydantic.Field() + provider: typing.Literal["google"] = pydantic.Field(default="google") """ The provider of the model """ diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_two_provider.py b/src/deepgram/types/agent_think_models_v1response_models_item_two_provider.py deleted file mode 100644 index 4c660732..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_two_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemTwoProvider = typing.Union[typing.Literal["google"], typing.Any] diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_zero.py b/src/deepgram/types/agent_think_models_v1response_models_item_zero.py index 543f6a93..28850db2 100644 --- a/src/deepgram/types/agent_think_models_v1response_models_item_zero.py +++ b/src/deepgram/types/agent_think_models_v1response_models_item_zero.py @@ -6,7 +6,6 @@ from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel from .agent_think_models_v1response_models_item_zero_id import AgentThinkModelsV1ResponseModelsItemZeroId -from .agent_think_models_v1response_models_item_zero_provider import AgentThinkModelsV1ResponseModelsItemZeroProvider class AgentThinkModelsV1ResponseModelsItemZero(UncheckedBaseModel): @@ -24,7 +23,7 @@ class AgentThinkModelsV1ResponseModelsItemZero(UncheckedBaseModel): The display name of the model """ - provider: AgentThinkModelsV1ResponseModelsItemZeroProvider = pydantic.Field() + provider: typing.Literal["open_ai"] = pydantic.Field(default="open_ai") """ The provider of the model """ diff --git a/src/deepgram/types/agent_think_models_v1response_models_item_zero_provider.py b/src/deepgram/types/agent_think_models_v1response_models_item_zero_provider.py deleted file mode 100644 index 0b4e2034..00000000 --- a/src/deepgram/types/agent_think_models_v1response_models_item_zero_provider.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AgentThinkModelsV1ResponseModelsItemZeroProvider = typing.Union[typing.Literal["open_ai"], typing.Any] diff --git a/src/deepgram/types/agent_variable_v1.py b/src/deepgram/types/agent_variable_v1.py new file mode 100644 index 00000000..135a0ab3 --- /dev/null +++ b/src/deepgram/types/agent_variable_v1.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +import datetime as dt +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class AgentVariableV1(UncheckedBaseModel): + """ + A template variable for agent configurations + """ + + variable_id: str = pydantic.Field() + """ + The unique identifier of the variable + """ + + key: str = pydantic.Field() + """ + The variable name, following the DG_ format + """ + + value: typing.Any + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + Timestamp when the variable was created + """ + + updated_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + Timestamp when the variable was last updated + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/anthropic.py b/src/deepgram/types/anthropic.py index 659b8cf8..c972254e 100644 --- a/src/deepgram/types/anthropic.py +++ b/src/deepgram/types/anthropic.py @@ -2,4 +2,34 @@ import typing -Anthropic = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .anthropic_think_provider_model import AnthropicThinkProviderModel + + +class Anthropic(UncheckedBaseModel): + type: typing.Literal["anthropic"] = "anthropic" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the Anthropic Messages API + """ + + model: AnthropicThinkProviderModel = pydantic.Field() + """ + Anthropic model to use + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + Anthropic temperature (0-1) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/anthropic_think_provider_model.py b/src/deepgram/types/anthropic_think_provider_model.py new file mode 100644 index 00000000..7c78238f --- /dev/null +++ b/src/deepgram/types/anthropic_think_provider_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AnthropicThinkProviderModel = typing.Union[ + typing.Literal["claude-3-5-haiku-latest", "claude-sonnet-4-20250514"], typing.Any +] diff --git a/src/deepgram/types/aws_bedrock_think_provider.py b/src/deepgram/types/aws_bedrock_think_provider.py index 4cd834f4..a601a278 100644 --- a/src/deepgram/types/aws_bedrock_think_provider.py +++ b/src/deepgram/types/aws_bedrock_think_provider.py @@ -2,4 +2,35 @@ import typing -AwsBedrockThinkProvider = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentials +from .aws_bedrock_think_provider_model import AwsBedrockThinkProviderModel + + +class AwsBedrockThinkProvider(UncheckedBaseModel): + type: typing.Literal["aws_bedrock"] = "aws_bedrock" + model: AwsBedrockThinkProviderModel = pydantic.Field() + """ + AWS Bedrock model to use + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + AWS Bedrock temperature (0-2) + """ + + credentials: typing.Optional[AwsBedrockThinkProviderCredentials] = pydantic.Field(default=None) + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/aws_bedrock_think_provider_credentials.py b/src/deepgram/types/aws_bedrock_think_provider_credentials.py new file mode 100644 index 00000000..c014d379 --- /dev/null +++ b/src/deepgram/types/aws_bedrock_think_provider_credentials.py @@ -0,0 +1,48 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .aws_bedrock_think_provider_credentials_type import AwsBedrockThinkProviderCredentialsType + + +class AwsBedrockThinkProviderCredentials(UncheckedBaseModel): + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ + + type: typing.Optional[AwsBedrockThinkProviderCredentialsType] = pydantic.Field(default=None) + """ + AWS credentials type (STS short-lived or IAM long-lived) + """ + + region: typing.Optional[str] = pydantic.Field(default=None) + """ + AWS region + """ + + access_key_id: typing.Optional[str] = pydantic.Field(default=None) + """ + AWS access key + """ + + secret_access_key: typing.Optional[str] = pydantic.Field(default=None) + """ + AWS secret access key + """ + + session_token: typing.Optional[str] = pydantic.Field(default=None) + """ + AWS session token (required for STS only) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/aws_bedrock_think_provider_credentials_type.py b/src/deepgram/types/aws_bedrock_think_provider_credentials_type.py new file mode 100644 index 00000000..df30845b --- /dev/null +++ b/src/deepgram/types/aws_bedrock_think_provider_credentials_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AwsBedrockThinkProviderCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any] diff --git a/src/deepgram/types/aws_bedrock_think_provider_model.py b/src/deepgram/types/aws_bedrock_think_provider_model.py new file mode 100644 index 00000000..1b816d93 --- /dev/null +++ b/src/deepgram/types/aws_bedrock_think_provider_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AwsBedrockThinkProviderModel = typing.Union[ + typing.Literal["anthropic/claude-3-5-sonnet-20240620-v1:0", "anthropic/claude-3-5-haiku-20240307-v1:0"], typing.Any +] diff --git a/src/deepgram/types/aws_polly_speak_provider.py b/src/deepgram/types/aws_polly_speak_provider.py index 4c9020dc..9dfa46bd 100644 --- a/src/deepgram/types/aws_polly_speak_provider.py +++ b/src/deepgram/types/aws_polly_speak_provider.py @@ -2,4 +2,39 @@ import typing -AwsPollySpeakProvider = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentials +from .aws_polly_speak_provider_engine import AwsPollySpeakProviderEngine +from .aws_polly_speak_provider_voice import AwsPollySpeakProviderVoice + + +class AwsPollySpeakProvider(UncheckedBaseModel): + type: typing.Literal["aws_polly"] = "aws_polly" + voice: AwsPollySpeakProviderVoice = pydantic.Field() + """ + AWS Polly voice name + """ + + language: str = pydantic.Field() + """ + Language code to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the AWS Polly API + """ + + language_code: typing.Optional[str] = pydantic.Field(default=None) + """ + Use the `language` field instead. + """ + + engine: AwsPollySpeakProviderEngine + credentials: AwsPollySpeakProviderCredentials + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py b/src/deepgram/types/aws_polly_speak_provider_credentials.py similarity index 57% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py rename to src/deepgram/types/aws_polly_speak_provider_credentials.py index daf92957..eca5defc 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_credentials.py +++ b/src/deepgram/types/aws_polly_speak_provider_credentials.py @@ -3,15 +3,13 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1update_speak_speak_one_item_provider_aws_polly_credentials_type import ( - AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType, -) +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .aws_polly_speak_provider_credentials_type import AwsPollySpeakProviderCredentialsType -class AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentials(UncheckedBaseModel): - type: AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyCredentialsType +class AwsPollySpeakProviderCredentials(UncheckedBaseModel): + type: AwsPollySpeakProviderCredentialsType region: str access_key_id: str secret_access_key: str diff --git a/src/deepgram/types/aws_polly_speak_provider_credentials_type.py b/src/deepgram/types/aws_polly_speak_provider_credentials_type.py new file mode 100644 index 00000000..0f1785cf --- /dev/null +++ b/src/deepgram/types/aws_polly_speak_provider_credentials_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AwsPollySpeakProviderCredentialsType = typing.Union[typing.Literal["sts", "iam"], typing.Any] diff --git a/src/deepgram/types/aws_polly_speak_provider_engine.py b/src/deepgram/types/aws_polly_speak_provider_engine.py new file mode 100644 index 00000000..4f42ea6d --- /dev/null +++ b/src/deepgram/types/aws_polly_speak_provider_engine.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AwsPollySpeakProviderEngine = typing.Union[typing.Literal["generative", "long-form", "standard", "neural"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_voice.py b/src/deepgram/types/aws_polly_speak_provider_voice.py similarity index 73% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_voice.py rename to src/deepgram/types/aws_polly_speak_provider_voice.py index 3f077a0e..c186c9b0 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_aws_polly_voice.py +++ b/src/deepgram/types/aws_polly_speak_provider_voice.py @@ -2,6 +2,6 @@ import typing -AgentV1UpdateSpeakSpeakOneItemProviderAwsPollyVoice = typing.Union[ +AwsPollySpeakProviderVoice = typing.Union[ typing.Literal["Matthew", "Joanna", "Amy", "Emma", "Brian", "Arthur", "Aria", "Ayanda"], typing.Any ] diff --git a/src/deepgram/types/cartesia.py b/src/deepgram/types/cartesia.py index 483f6949..5ec0b4d4 100644 --- a/src/deepgram/types/cartesia.py +++ b/src/deepgram/types/cartesia.py @@ -2,4 +2,41 @@ import typing -Cartesia = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .cartesia_speak_provider_model_id import CartesiaSpeakProviderModelId +from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoice + + +class Cartesia(UncheckedBaseModel): + type: typing.Literal["cartesia"] = "cartesia" + version: typing.Optional[typing.Literal["2025-03-17"]] = pydantic.Field(default=None) + """ + The API version header for the Cartesia text-to-speech API + """ + + model_id: CartesiaSpeakProviderModelId = pydantic.Field() + """ + Cartesia model ID + """ + + voice: CartesiaSpeakProviderVoice + language: typing.Optional[str] = pydantic.Field(default=None) + """ + Cartesia language code + """ + + volume: typing.Optional[float] = pydantic.Field(default=None) + """ + Volume level for Cartesia TTS output. Valid range: 0.5 to 2.0. See [Cartesia documentation](https://docs.cartesia.ai/build-with-cartesia/sonic-3/volume-speed-emotion#volume-speed-and-emotion). + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/cartesia_speak_provider_model_id.py b/src/deepgram/types/cartesia_speak_provider_model_id.py new file mode 100644 index 00000000..f8a9d7e9 --- /dev/null +++ b/src/deepgram/types/cartesia_speak_provider_model_id.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CartesiaSpeakProviderModelId = typing.Union[typing.Literal["sonic-2", "sonic-multilingual"], typing.Any] diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py b/src/deepgram/types/cartesia_speak_provider_voice.py similarity index 73% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py rename to src/deepgram/types/cartesia_speak_provider_voice.py index 78756f0b..cfc897e7 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_cartesia_voice.py +++ b/src/deepgram/types/cartesia_speak_provider_voice.py @@ -3,11 +3,11 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel -class AgentV1UpdateSpeakSpeakOneItemProviderCartesiaVoice(UncheckedBaseModel): +class CartesiaSpeakProviderVoice(UncheckedBaseModel): mode: str = pydantic.Field() """ Cartesia voice mode diff --git a/src/deepgram/types/create_agent_configuration_v1response.py b/src/deepgram/types/create_agent_configuration_v1response.py new file mode 100644 index 00000000..1629717a --- /dev/null +++ b/src/deepgram/types/create_agent_configuration_v1response.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class CreateAgentConfigurationV1Response(UncheckedBaseModel): + agent_id: str = pydantic.Field() + """ + The unique identifier of the newly created agent configuration + """ + + config: typing.Dict[str, typing.Any] = pydantic.Field() + """ + The parsed agent configuration object + """ + + metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + Metadata associated with the agent configuration + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/deepgram.py b/src/deepgram/types/deepgram.py index b18e81bd..88579177 100644 --- a/src/deepgram/types/deepgram.py +++ b/src/deepgram/types/deepgram.py @@ -2,4 +2,34 @@ import typing -Deepgram = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .deepgram_speak_provider_model import DeepgramSpeakProviderModel + + +class Deepgram(UncheckedBaseModel): + type: typing.Literal["deepgram"] = "deepgram" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the Deepgram text-to-speech API + """ + + model: DeepgramSpeakProviderModel = pydantic.Field() + """ + Deepgram TTS model + """ + + speed: typing.Optional[float] = pydantic.Field(default=None) + """ + Speaking rate multiplier that adjusts the pace of generated speech while preserving natural prosody and voice quality. Not yet supported in all languages. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_model.py b/src/deepgram/types/deepgram_speak_provider_model.py similarity index 96% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_model.py rename to src/deepgram/types/deepgram_speak_provider_model.py index 7600a539..72bcae5c 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_deepgram_model.py +++ b/src/deepgram/types/deepgram_speak_provider_model.py @@ -2,7 +2,7 @@ import typing -AgentV1UpdateSpeakSpeakOneItemProviderDeepgramModel = typing.Union[ +DeepgramSpeakProviderModel = typing.Union[ typing.Literal[ "aura-asteria-en", "aura-luna-en", diff --git a/src/deepgram/speak/v1/types/speak_v1text_type.py b/src/deepgram/types/delete_agent_configuration_v1response.py similarity index 54% rename from src/deepgram/speak/v1/types/speak_v1text_type.py rename to src/deepgram/types/delete_agent_configuration_v1response.py index f6b0345f..7adb31a8 100644 --- a/src/deepgram/speak/v1/types/speak_v1text_type.py +++ b/src/deepgram/types/delete_agent_configuration_v1response.py @@ -2,4 +2,4 @@ import typing -SpeakV1TextType = typing.Union[typing.Literal["Speak"], typing.Any] +DeleteAgentConfigurationV1Response = typing.Dict[str, typing.Any] diff --git a/src/deepgram/agent/v1/types/max.py b/src/deepgram/types/delete_agent_variable_v1response.py similarity index 56% rename from src/deepgram/agent/v1/types/max.py rename to src/deepgram/types/delete_agent_variable_v1response.py index 71bcad70..d4c1a33c 100644 --- a/src/deepgram/agent/v1/types/max.py +++ b/src/deepgram/types/delete_agent_variable_v1response.py @@ -2,4 +2,4 @@ import typing -Max = typing.Union[typing.Literal["max"], typing.Any] +DeleteAgentVariableV1Response = typing.Dict[str, typing.Any] diff --git a/src/deepgram/types/eleven_labs_speak_provider.py b/src/deepgram/types/eleven_labs_speak_provider.py index 606028cc..070d7423 100644 --- a/src/deepgram/types/eleven_labs_speak_provider.py +++ b/src/deepgram/types/eleven_labs_speak_provider.py @@ -2,4 +2,39 @@ import typing -ElevenLabsSpeakProvider = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .eleven_labs_speak_provider_model_id import ElevenLabsSpeakProviderModelId + + +class ElevenLabsSpeakProvider(UncheckedBaseModel): + type: typing.Literal["eleven_labs"] = "eleven_labs" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the ElevenLabs text-to-speech API + """ + + model_id: ElevenLabsSpeakProviderModelId = pydantic.Field() + """ + Eleven Labs model ID + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + Optional language to use, e.g. 'en-US'. Corresponds to the `language_code` parameter in the ElevenLabs API + """ + + language_code: typing.Optional[str] = pydantic.Field(default=None) + """ + Use the `language` field instead. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id.py b/src/deepgram/types/eleven_labs_speak_provider_model_id.py similarity index 71% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id.py rename to src/deepgram/types/eleven_labs_speak_provider_model_id.py index 33b709fe..ce0427e2 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_provider_eleven_labs_model_id.py +++ b/src/deepgram/types/eleven_labs_speak_provider_model_id.py @@ -2,6 +2,6 @@ import typing -AgentV1UpdateSpeakSpeakOneItemProviderElevenLabsModelId = typing.Union[ +ElevenLabsSpeakProviderModelId = typing.Union[ typing.Literal["eleven_turbo_v2_5", "eleven_monolingual_v1", "eleven_multilingual_v2"], typing.Any ] diff --git a/src/deepgram/types/google.py b/src/deepgram/types/google.py index 463900ac..56e86997 100644 --- a/src/deepgram/types/google.py +++ b/src/deepgram/types/google.py @@ -2,4 +2,34 @@ import typing -Google = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .google_think_provider_model import GoogleThinkProviderModel + + +class Google(UncheckedBaseModel): + type: typing.Literal["google"] = "google" + version: typing.Optional[typing.Literal["v1beta"]] = pydantic.Field(default=None) + """ + The REST API version for the Google generative language API + """ + + model: GoogleThinkProviderModel = pydantic.Field() + """ + Google model to use + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + Google temperature (0-2) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/google_think_provider_model.py b/src/deepgram/types/google_think_provider_model.py new file mode 100644 index 00000000..a6061bac --- /dev/null +++ b/src/deepgram/types/google_think_provider_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleThinkProviderModel = typing.Union[ + typing.Literal["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemini-2.5-flash"], typing.Any +] diff --git a/src/deepgram/types/groq.py b/src/deepgram/types/groq.py index 0df2d3c2..b04ae473 100644 --- a/src/deepgram/types/groq.py +++ b/src/deepgram/types/groq.py @@ -2,4 +2,33 @@ import typing -Groq = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel + + +class Groq(UncheckedBaseModel): + type: typing.Literal["groq"] = "groq" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the Groq's chat completions API (mostly OpenAI-compatible) + """ + + model: typing.Literal["openai/gpt-oss-20b"] = pydantic.Field(default="openai/gpt-oss-20b") + """ + Groq model to use + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + Groq temperature (0-2) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/list_agent_configurations_v1response.py b/src/deepgram/types/list_agent_configurations_v1response.py new file mode 100644 index 00000000..1e0cc256 --- /dev/null +++ b/src/deepgram/types/list_agent_configurations_v1response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_configuration_v1 import AgentConfigurationV1 + + +class ListAgentConfigurationsV1Response(UncheckedBaseModel): + agents: typing.Optional[typing.List[AgentConfigurationV1]] = pydantic.Field(default=None) + """ + A list of agent configurations for the project + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/list_agent_variables_v1response.py b/src/deepgram/types/list_agent_variables_v1response.py new file mode 100644 index 00000000..390a9e6d --- /dev/null +++ b/src/deepgram/types/list_agent_variables_v1response.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_variable_v1 import AgentVariableV1 + + +class ListAgentVariablesV1Response(UncheckedBaseModel): + variables: typing.Optional[typing.List[AgentVariableV1]] = pydantic.Field(default=None) + """ + A list of agent variables for the project + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py index eec3866b..a622d4ae 100644 --- a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py +++ b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py @@ -14,8 +14,8 @@ class ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsIte sentences: typing.Optional[ typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem] ] = None - speaker: typing.Optional[int] = None - num_words: typing.Optional[int] = None + speaker: typing.Optional[float] = None + num_words: typing.Optional[float] = None start: typing.Optional[float] = None end: typing.Optional[float] = None diff --git a/src/deepgram/types/listen_v1response_results_utterances_item.py b/src/deepgram/types/listen_v1response_results_utterances_item.py index 0947d9f5..ed8a8ddd 100644 --- a/src/deepgram/types/listen_v1response_results_utterances_item.py +++ b/src/deepgram/types/listen_v1response_results_utterances_item.py @@ -12,10 +12,10 @@ class ListenV1ResponseResultsUtterancesItem(UncheckedBaseModel): start: typing.Optional[float] = None end: typing.Optional[float] = None confidence: typing.Optional[float] = None - channel: typing.Optional[int] = None + channel: typing.Optional[float] = None transcript: typing.Optional[str] = None words: typing.Optional[typing.List[ListenV1ResponseResultsUtterancesItemWordsItem]] = None - speaker: typing.Optional[int] = None + speaker: typing.Optional[float] = None id: typing.Optional[str] = None if IS_PYDANTIC_V2: diff --git a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py index 6cd1313a..716f2e2a 100644 --- a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py +++ b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py @@ -12,7 +12,7 @@ class ListenV1ResponseResultsUtterancesItemWordsItem(UncheckedBaseModel): start: typing.Optional[float] = None end: typing.Optional[float] = None confidence: typing.Optional[float] = None - speaker: typing.Optional[int] = None + speaker: typing.Optional[float] = None speaker_confidence: typing.Optional[float] = None punctuated_word: typing.Optional[str] = None diff --git a/src/deepgram/types/listen_v2model.py b/src/deepgram/types/listen_v2model.py index ad86649f..2a62c3b2 100644 --- a/src/deepgram/types/listen_v2model.py +++ b/src/deepgram/types/listen_v2model.py @@ -2,4 +2,4 @@ import typing -ListenV2Model = typing.Union[typing.Literal["flux-general-en"], typing.Any] +ListenV2Model = typing.Union[typing.Literal["flux-general-en", "flux-general-multi"], typing.Any] diff --git a/src/deepgram/types/open_ai_speak_provider.py b/src/deepgram/types/open_ai_speak_provider.py index e21ee8f2..204c2434 100644 --- a/src/deepgram/types/open_ai_speak_provider.py +++ b/src/deepgram/types/open_ai_speak_provider.py @@ -2,4 +2,35 @@ import typing -OpenAiSpeakProvider = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .open_ai_speak_provider_model import OpenAiSpeakProviderModel +from .open_ai_speak_provider_voice import OpenAiSpeakProviderVoice + + +class OpenAiSpeakProvider(UncheckedBaseModel): + type: typing.Literal["open_ai"] = "open_ai" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the OpenAI text-to-speech API + """ + + model: OpenAiSpeakProviderModel = pydantic.Field() + """ + OpenAI TTS model + """ + + voice: OpenAiSpeakProviderVoice = pydantic.Field() + """ + OpenAI voice + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/open_ai_speak_provider_model.py b/src/deepgram/types/open_ai_speak_provider_model.py new file mode 100644 index 00000000..ed04c40f --- /dev/null +++ b/src/deepgram/types/open_ai_speak_provider_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiSpeakProviderModel = typing.Union[typing.Literal["tts-1", "tts-1-hd"], typing.Any] diff --git a/src/deepgram/types/open_ai_speak_provider_voice.py b/src/deepgram/types/open_ai_speak_provider_voice.py new file mode 100644 index 00000000..3d7608a5 --- /dev/null +++ b/src/deepgram/types/open_ai_speak_provider_voice.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiSpeakProviderVoice = typing.Union[typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any] diff --git a/src/deepgram/types/open_ai_think_provider.py b/src/deepgram/types/open_ai_think_provider.py index fed929a1..fd3e2601 100644 --- a/src/deepgram/types/open_ai_think_provider.py +++ b/src/deepgram/types/open_ai_think_provider.py @@ -2,4 +2,34 @@ import typing -OpenAiThinkProvider = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .open_ai_think_provider_model import OpenAiThinkProviderModel + + +class OpenAiThinkProvider(UncheckedBaseModel): + type: typing.Literal["open_ai"] = "open_ai" + version: typing.Optional[typing.Literal["v1"]] = pydantic.Field(default=None) + """ + The REST API version for the OpenAI chat completions API + """ + + model: OpenAiThinkProviderModel = pydantic.Field() + """ + OpenAI model to use + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + OpenAI temperature (0-2) + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/types/open_ai_think_provider_model.py b/src/deepgram/types/open_ai_think_provider_model.py new file mode 100644 index 00000000..80b24e48 --- /dev/null +++ b/src/deepgram/types/open_ai_think_provider_model.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiThinkProviderModel = typing.Union[ + typing.Literal[ + "gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", "gpt-4o", "gpt-4o-mini" + ], + typing.Any, +] diff --git a/src/deepgram/types/speak_settings_v1.py b/src/deepgram/types/speak_settings_v1.py index 20ab1f5a..e4c41a60 100644 --- a/src/deepgram/types/speak_settings_v1.py +++ b/src/deepgram/types/speak_settings_v1.py @@ -2,4 +2,26 @@ import typing -SpeakSettingsV1 = typing.Any +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .speak_settings_v1endpoint import SpeakSettingsV1Endpoint +from .speak_settings_v1provider import SpeakSettingsV1Provider + + +class SpeakSettingsV1(UncheckedBaseModel): + provider: SpeakSettingsV1Provider + endpoint: typing.Optional[SpeakSettingsV1Endpoint] = pydantic.Field(default=None) + """ + Optional if provider is Deepgram. Required for non-Deepgram TTS providers. + When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_endpoint.py b/src/deepgram/types/speak_settings_v1endpoint.py similarity index 82% rename from src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_endpoint.py rename to src/deepgram/types/speak_settings_v1endpoint.py index 45694cc7..ac806e29 100644 --- a/src/deepgram/agent/v1/types/agent_v1update_speak_speak_one_item_endpoint.py +++ b/src/deepgram/types/speak_settings_v1endpoint.py @@ -3,11 +3,11 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel -class AgentV1UpdateSpeakSpeakOneItemEndpoint(UncheckedBaseModel): +class SpeakSettingsV1Endpoint(UncheckedBaseModel): """ Optional if provider is Deepgram. Required for non-Deepgram TTS providers. When present, must include url field and headers object. Valid schemes are https and wss with wss only supported for Eleven Labs. diff --git a/src/deepgram/types/speak_settings_v1provider.py b/src/deepgram/types/speak_settings_v1provider.py new file mode 100644 index 00000000..409122ed --- /dev/null +++ b/src/deepgram/types/speak_settings_v1provider.py @@ -0,0 +1,116 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel, UnionMetadata +from .aws_polly_speak_provider_credentials import AwsPollySpeakProviderCredentials +from .aws_polly_speak_provider_engine import AwsPollySpeakProviderEngine +from .aws_polly_speak_provider_voice import AwsPollySpeakProviderVoice +from .cartesia_speak_provider_model_id import CartesiaSpeakProviderModelId +from .cartesia_speak_provider_voice import CartesiaSpeakProviderVoice +from .deepgram_speak_provider_model import DeepgramSpeakProviderModel +from .eleven_labs_speak_provider_model_id import ElevenLabsSpeakProviderModelId +from .open_ai_speak_provider_model import OpenAiSpeakProviderModel +from .open_ai_speak_provider_voice import OpenAiSpeakProviderVoice + + +class SpeakSettingsV1Provider_Deepgram(UncheckedBaseModel): + type: typing.Literal["deepgram"] = "deepgram" + version: typing.Optional[typing.Literal["v1"]] = None + model: DeepgramSpeakProviderModel + speed: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SpeakSettingsV1Provider_ElevenLabs(UncheckedBaseModel): + type: typing.Literal["eleven_labs"] = "eleven_labs" + version: typing.Optional[typing.Literal["v1"]] = None + model_id: ElevenLabsSpeakProviderModelId + language: typing.Optional[str] = None + language_code: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SpeakSettingsV1Provider_Cartesia(UncheckedBaseModel): + type: typing.Literal["cartesia"] = "cartesia" + version: typing.Optional[typing.Literal["2025-03-17"]] = None + model_id: CartesiaSpeakProviderModelId + voice: CartesiaSpeakProviderVoice + language: typing.Optional[str] = None + volume: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SpeakSettingsV1Provider_OpenAi(UncheckedBaseModel): + type: typing.Literal["open_ai"] = "open_ai" + version: typing.Optional[typing.Literal["v1"]] = None + model: OpenAiSpeakProviderModel + voice: OpenAiSpeakProviderVoice + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class SpeakSettingsV1Provider_AwsPolly(UncheckedBaseModel): + type: typing.Literal["aws_polly"] = "aws_polly" + voice: AwsPollySpeakProviderVoice + language: str + language_code: typing.Optional[str] = None + engine: AwsPollySpeakProviderEngine + credentials: AwsPollySpeakProviderCredentials + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +SpeakSettingsV1Provider = typing_extensions.Annotated[ + typing.Union[ + SpeakSettingsV1Provider_Deepgram, + SpeakSettingsV1Provider_ElevenLabs, + SpeakSettingsV1Provider_Cartesia, + SpeakSettingsV1Provider_OpenAi, + SpeakSettingsV1Provider_AwsPolly, + ], + UnionMetadata(discriminant="type"), +] diff --git a/src/deepgram/types/think_settings_v1.py b/src/deepgram/types/think_settings_v1.py index 1de56d5b..f74c63d1 100644 --- a/src/deepgram/types/think_settings_v1.py +++ b/src/deepgram/types/think_settings_v1.py @@ -5,10 +5,25 @@ import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.unchecked_base_model import UncheckedBaseModel +from .think_settings_v1context_length import ThinkSettingsV1ContextLength +from .think_settings_v1endpoint import ThinkSettingsV1Endpoint +from .think_settings_v1functions_item import ThinkSettingsV1FunctionsItem +from .think_settings_v1provider import ThinkSettingsV1Provider class ThinkSettingsV1(UncheckedBaseModel): - context_length: typing.Optional[typing.Any] = None + provider: ThinkSettingsV1Provider + endpoint: typing.Optional[ThinkSettingsV1Endpoint] = pydantic.Field(default=None) + """ + Optional for non-Deepgram LLM providers. When present, must include url field and headers object + """ + + functions: typing.Optional[typing.List[ThinkSettingsV1FunctionsItem]] = None + prompt: typing.Optional[str] = None + context_length: typing.Optional[ThinkSettingsV1ContextLength] = pydantic.Field(default=None) + """ + Specifies the number of characters retained in context between user messages, agent responses, and function calls. This setting is only configurable when a custom think endpoint is used + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/deepgram/agent/v1/types/agent_v1warning_type.py b/src/deepgram/types/think_settings_v1context_length.py similarity index 51% rename from src/deepgram/agent/v1/types/agent_v1warning_type.py rename to src/deepgram/types/think_settings_v1context_length.py index af9abcdb..dc9a6555 100644 --- a/src/deepgram/agent/v1/types/agent_v1warning_type.py +++ b/src/deepgram/types/think_settings_v1context_length.py @@ -2,4 +2,4 @@ import typing -AgentV1WarningType = typing.Union[typing.Literal["Warning"], typing.Any] +ThinkSettingsV1ContextLength = typing.Union[typing.Literal["max"], float] diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_endpoint.py b/src/deepgram/types/think_settings_v1endpoint.py similarity index 80% rename from src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_endpoint.py rename to src/deepgram/types/think_settings_v1endpoint.py index 09e0a99d..a98942ca 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_endpoint.py +++ b/src/deepgram/types/think_settings_v1endpoint.py @@ -3,11 +3,11 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel -class AgentV1SettingsAgentThinkOneItemEndpoint(UncheckedBaseModel): +class ThinkSettingsV1Endpoint(UncheckedBaseModel): """ Optional for non-Deepgram LLM providers. When present, must include url field and headers object """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item.py b/src/deepgram/types/think_settings_v1functions_item.py similarity index 65% rename from src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item.py rename to src/deepgram/types/think_settings_v1functions_item.py index 0bdf2a29..6a142ae5 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item.py +++ b/src/deepgram/types/think_settings_v1functions_item.py @@ -3,14 +3,12 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel -from .agent_v1settings_agent_think_one_item_functions_item_endpoint import ( - AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint, -) +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel +from .think_settings_v1functions_item_endpoint import ThinkSettingsV1FunctionsItemEndpoint -class AgentV1SettingsAgentThinkOneItemFunctionsItem(UncheckedBaseModel): +class ThinkSettingsV1FunctionsItem(UncheckedBaseModel): name: typing.Optional[str] = pydantic.Field(default=None) """ Function name @@ -26,7 +24,7 @@ class AgentV1SettingsAgentThinkOneItemFunctionsItem(UncheckedBaseModel): Function parameters """ - endpoint: typing.Optional[AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint] = pydantic.Field(default=None) + endpoint: typing.Optional[ThinkSettingsV1FunctionsItemEndpoint] = pydantic.Field(default=None) """ The Function endpoint to call. if not passed, function is called client-side """ diff --git a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item_endpoint.py b/src/deepgram/types/think_settings_v1functions_item_endpoint.py similarity index 78% rename from src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item_endpoint.py rename to src/deepgram/types/think_settings_v1functions_item_endpoint.py index 74886529..b3e0148e 100644 --- a/src/deepgram/agent/v1/types/agent_v1settings_agent_think_one_item_functions_item_endpoint.py +++ b/src/deepgram/types/think_settings_v1functions_item_endpoint.py @@ -3,11 +3,11 @@ import typing import pydantic -from ....core.pydantic_utilities import IS_PYDANTIC_V2 -from ....core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel -class AgentV1SettingsAgentThinkOneItemFunctionsItemEndpoint(UncheckedBaseModel): +class ThinkSettingsV1FunctionsItemEndpoint(UncheckedBaseModel): """ The Function endpoint to call. if not passed, function is called client-side """ diff --git a/src/deepgram/types/think_settings_v1provider.py b/src/deepgram/types/think_settings_v1provider.py new file mode 100644 index 00000000..799ec07d --- /dev/null +++ b/src/deepgram/types/think_settings_v1provider.py @@ -0,0 +1,107 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +import pydantic +import typing_extensions +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.unchecked_base_model import UncheckedBaseModel, UnionMetadata +from .anthropic_think_provider_model import AnthropicThinkProviderModel +from .aws_bedrock_think_provider_credentials import AwsBedrockThinkProviderCredentials +from .aws_bedrock_think_provider_model import AwsBedrockThinkProviderModel +from .google_think_provider_model import GoogleThinkProviderModel +from .open_ai_think_provider_model import OpenAiThinkProviderModel + + +class ThinkSettingsV1Provider_OpenAi(UncheckedBaseModel): + type: typing.Literal["open_ai"] = "open_ai" + version: typing.Optional[typing.Literal["v1"]] = None + model: OpenAiThinkProviderModel + temperature: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ThinkSettingsV1Provider_AwsBedrock(UncheckedBaseModel): + type: typing.Literal["aws_bedrock"] = "aws_bedrock" + model: AwsBedrockThinkProviderModel + temperature: typing.Optional[float] = None + credentials: typing.Optional[AwsBedrockThinkProviderCredentials] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ThinkSettingsV1Provider_Anthropic(UncheckedBaseModel): + type: typing.Literal["anthropic"] = "anthropic" + version: typing.Optional[typing.Literal["v1"]] = None + model: AnthropicThinkProviderModel + temperature: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ThinkSettingsV1Provider_Google(UncheckedBaseModel): + type: typing.Literal["google"] = "google" + version: typing.Optional[typing.Literal["v1beta"]] = None + model: GoogleThinkProviderModel + temperature: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class ThinkSettingsV1Provider_Groq(UncheckedBaseModel): + type: typing.Literal["groq"] = "groq" + version: typing.Optional[typing.Literal["v1"]] = None + model: typing.Literal["openai/gpt-oss-20b"] = "openai/gpt-oss-20b" + temperature: typing.Optional[float] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +ThinkSettingsV1Provider = typing_extensions.Annotated[ + typing.Union[ + ThinkSettingsV1Provider_OpenAi, + ThinkSettingsV1Provider_AwsBedrock, + ThinkSettingsV1Provider_Anthropic, + ThinkSettingsV1Provider_Google, + ThinkSettingsV1Provider_Groq, + ], + UnionMetadata(discriminant="type"), +] diff --git a/src/deepgram/voice_agent/__init__.py b/src/deepgram/voice_agent/__init__.py new file mode 100644 index 00000000..a16b8d4b --- /dev/null +++ b/src/deepgram/voice_agent/__init__.py @@ -0,0 +1,34 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + +import typing +from importlib import import_module + +if typing.TYPE_CHECKING: + from . import configurations, variables +_dynamic_imports: typing.Dict[str, str] = {"configurations": ".configurations", "variables": ".variables"} + + +def __getattr__(attr_name: str) -> typing.Any: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError(f"No {attr_name} found in _dynamic_imports for module name -> {__name__}") + try: + module = import_module(module_name, __package__) + if module_name == f".{attr_name}": + return module + else: + return getattr(module, attr_name) + except ImportError as e: + raise ImportError(f"Failed to import {attr_name} from {module_name}: {e}") from e + except AttributeError as e: + raise AttributeError(f"Failed to get {attr_name} from {module_name}: {e}") from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) + + +__all__ = ["configurations", "variables"] diff --git a/src/deepgram/voice_agent/client.py b/src/deepgram/voice_agent/client.py new file mode 100644 index 00000000..2a1b4f59 --- /dev/null +++ b/src/deepgram/voice_agent/client.py @@ -0,0 +1,82 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations + +import typing + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .raw_client import AsyncRawVoiceAgentClient, RawVoiceAgentClient + +if typing.TYPE_CHECKING: + from .configurations.client import AsyncConfigurationsClient, ConfigurationsClient + from .variables.client import AsyncVariablesClient, VariablesClient + + +class VoiceAgentClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawVoiceAgentClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper + self._configurations: typing.Optional[ConfigurationsClient] = None + self._variables: typing.Optional[VariablesClient] = None + + @property + def with_raw_response(self) -> RawVoiceAgentClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawVoiceAgentClient + """ + return self._raw_client + + @property + def configurations(self): + if self._configurations is None: + from .configurations.client import ConfigurationsClient # noqa: E402 + + self._configurations = ConfigurationsClient(client_wrapper=self._client_wrapper) + return self._configurations + + @property + def variables(self): + if self._variables is None: + from .variables.client import VariablesClient # noqa: E402 + + self._variables = VariablesClient(client_wrapper=self._client_wrapper) + return self._variables + + +class AsyncVoiceAgentClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawVoiceAgentClient(client_wrapper=client_wrapper) + self._client_wrapper = client_wrapper + self._configurations: typing.Optional[AsyncConfigurationsClient] = None + self._variables: typing.Optional[AsyncVariablesClient] = None + + @property + def with_raw_response(self) -> AsyncRawVoiceAgentClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawVoiceAgentClient + """ + return self._raw_client + + @property + def configurations(self): + if self._configurations is None: + from .configurations.client import AsyncConfigurationsClient # noqa: E402 + + self._configurations = AsyncConfigurationsClient(client_wrapper=self._client_wrapper) + return self._configurations + + @property + def variables(self): + if self._variables is None: + from .variables.client import AsyncVariablesClient # noqa: E402 + + self._variables = AsyncVariablesClient(client_wrapper=self._client_wrapper) + return self._variables diff --git a/src/deepgram/voice_agent/configurations/__init__.py b/src/deepgram/voice_agent/configurations/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/deepgram/voice_agent/configurations/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/deepgram/voice_agent/configurations/client.py b/src/deepgram/voice_agent/configurations/client.py new file mode 100644 index 00000000..54f344b9 --- /dev/null +++ b/src/deepgram/voice_agent/configurations/client.py @@ -0,0 +1,496 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.request_options import RequestOptions +from ...types.agent_configuration_v1 import AgentConfigurationV1 +from ...types.create_agent_configuration_v1response import CreateAgentConfigurationV1Response +from ...types.delete_agent_configuration_v1response import DeleteAgentConfigurationV1Response +from ...types.list_agent_configurations_v1response import ListAgentConfigurationsV1Response +from .raw_client import AsyncRawConfigurationsClient, RawConfigurationsClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class ConfigurationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawConfigurationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawConfigurationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawConfigurationsClient + """ + return self._raw_client + + def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ListAgentConfigurationsV1Response: + """ + Returns all agent configurations for the specified project. Configurations are returned in their uninterpolated form—template variable placeholders appear as-is rather than with their substituted values. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgentConfigurationsV1Response + A list of agent configurations + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.configurations.list( + project_id="123456-7890-1234-5678-901234", + ) + """ + _response = self._raw_client.list(project_id, request_options=request_options) + return _response.data + + def create( + self, + project_id: str, + *, + config: str, + metadata: typing.Optional[typing.Dict[str, str]] = OMIT, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentConfigurationV1Response: + """ + Creates a new reusable agent configuration. The `config` field must be a valid JSON string representing the `agent` block of a Settings message. The returned `agent_id` can be passed in place of the full `agent` object in future Settings messages. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + config : str + A valid JSON string representing the agent block of a Settings message + + metadata : typing.Optional[typing.Dict[str, str]] + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentConfigurationV1Response + Agent configuration created successfully + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.configurations.create( + project_id="123456-7890-1234-5678-901234", + config="config", + ) + """ + _response = self._raw_client.create( + project_id, config=config, metadata=metadata, api_version=api_version, request_options=request_options + ) + return _response.data + + def get( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AgentConfigurationV1: + """ + Returns the specified agent configuration in its uninterpolated form + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentConfigurationV1 + An agent configuration + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.configurations.get( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + """ + _response = self._raw_client.get(project_id, agent_id, request_options=request_options) + return _response.data + + def update( + self, + project_id: str, + agent_id: str, + *, + metadata: typing.Dict[str, str], + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentConfigurationV1: + """ + Updates the metadata associated with an agent configuration. The config itself is immutable—to change the configuration, delete the existing agent and create a new one. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + metadata : typing.Dict[str, str] + A map of string key-value pairs to associate with this agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentConfigurationV1 + Agent configuration updated + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.configurations.update( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + metadata={"key": "value"}, + ) + """ + _response = self._raw_client.update(project_id, agent_id, metadata=metadata, request_options=request_options) + return _response.data + + def delete( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteAgentConfigurationV1Response: + """ + Deletes the specified agent configuration. Deleting an agent configuration can cause a production outage if your service references this agent UUID. Migrate all active sessions to a new configuration before deleting. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeleteAgentConfigurationV1Response + Agent configuration deleted + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.configurations.delete( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + """ + _response = self._raw_client.delete(project_id, agent_id, request_options=request_options) + return _response.data + + +class AsyncConfigurationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawConfigurationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawConfigurationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawConfigurationsClient + """ + return self._raw_client + + async def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ListAgentConfigurationsV1Response: + """ + Returns all agent configurations for the specified project. Configurations are returned in their uninterpolated form—template variable placeholders appear as-is rather than with their substituted values. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgentConfigurationsV1Response + A list of agent configurations + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.configurations.list( + project_id="123456-7890-1234-5678-901234", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.list(project_id, request_options=request_options) + return _response.data + + async def create( + self, + project_id: str, + *, + config: str, + metadata: typing.Optional[typing.Dict[str, str]] = OMIT, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentConfigurationV1Response: + """ + Creates a new reusable agent configuration. The `config` field must be a valid JSON string representing the `agent` block of a Settings message. The returned `agent_id` can be passed in place of the full `agent` object in future Settings messages. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + config : str + A valid JSON string representing the agent block of a Settings message + + metadata : typing.Optional[typing.Dict[str, str]] + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentConfigurationV1Response + Agent configuration created successfully + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.configurations.create( + project_id="123456-7890-1234-5678-901234", + config="config", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.create( + project_id, config=config, metadata=metadata, api_version=api_version, request_options=request_options + ) + return _response.data + + async def get( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AgentConfigurationV1: + """ + Returns the specified agent configuration in its uninterpolated form + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentConfigurationV1 + An agent configuration + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.configurations.get( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.get(project_id, agent_id, request_options=request_options) + return _response.data + + async def update( + self, + project_id: str, + agent_id: str, + *, + metadata: typing.Dict[str, str], + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentConfigurationV1: + """ + Updates the metadata associated with an agent configuration. The config itself is immutable—to change the configuration, delete the existing agent and create a new one. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + metadata : typing.Dict[str, str] + A map of string key-value pairs to associate with this agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentConfigurationV1 + Agent configuration updated + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.configurations.update( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + metadata={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.update( + project_id, agent_id, metadata=metadata, request_options=request_options + ) + return _response.data + + async def delete( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteAgentConfigurationV1Response: + """ + Deletes the specified agent configuration. Deleting an agent configuration can cause a production outage if your service references this agent UUID. Migrate all active sessions to a new configuration before deleting. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeleteAgentConfigurationV1Response + Agent configuration deleted + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.configurations.delete( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.delete(project_id, agent_id, request_options=request_options) + return _response.data diff --git a/src/deepgram/voice_agent/configurations/raw_client.py b/src/deepgram/voice_agent/configurations/raw_client.py new file mode 100644 index 00000000..6d2695a2 --- /dev/null +++ b/src/deepgram/voice_agent/configurations/raw_client.py @@ -0,0 +1,635 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.http_response import AsyncHttpResponse, HttpResponse +from ...core.jsonable_encoder import jsonable_encoder +from ...core.request_options import RequestOptions +from ...core.unchecked_base_model import construct_type +from ...errors.bad_request_error import BadRequestError +from ...types.agent_configuration_v1 import AgentConfigurationV1 +from ...types.create_agent_configuration_v1response import CreateAgentConfigurationV1Response +from ...types.delete_agent_configuration_v1response import DeleteAgentConfigurationV1Response +from ...types.list_agent_configurations_v1response import ListAgentConfigurationsV1Response + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawConfigurationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[ListAgentConfigurationsV1Response]: + """ + Returns all agent configurations for the specified project. Configurations are returned in their uninterpolated form—template variable placeholders appear as-is rather than with their substituted values. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListAgentConfigurationsV1Response] + A list of agent configurations + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgentConfigurationsV1Response, + construct_type( + type_=ListAgentConfigurationsV1Response, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create( + self, + project_id: str, + *, + config: str, + metadata: typing.Optional[typing.Dict[str, str]] = OMIT, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateAgentConfigurationV1Response]: + """ + Creates a new reusable agent configuration. The `config` field must be a valid JSON string representing the `agent` block of a Settings message. The returned `agent_id` can be passed in place of the full `agent` object in future Settings messages. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + config : str + A valid JSON string representing the agent block of a Settings message + + metadata : typing.Optional[typing.Dict[str, str]] + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateAgentConfigurationV1Response] + Agent configuration created successfully + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents", + base_url=self._client_wrapper.get_environment().base, + method="POST", + json={ + "config": config, + "metadata": metadata, + "api_version": api_version, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentConfigurationV1Response, + construct_type( + type_=CreateAgentConfigurationV1Response, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentConfigurationV1]: + """ + Returns the specified agent configuration in its uninterpolated form + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentConfigurationV1] + An agent configuration + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentConfigurationV1, + construct_type( + type_=AgentConfigurationV1, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update( + self, + project_id: str, + agent_id: str, + *, + metadata: typing.Dict[str, str], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentConfigurationV1]: + """ + Updates the metadata associated with an agent configuration. The config itself is immutable—to change the configuration, delete the existing agent and create a new one. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + metadata : typing.Dict[str, str] + A map of string key-value pairs to associate with this agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentConfigurationV1] + Agent configuration updated + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="PUT", + json={ + "metadata": metadata, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentConfigurationV1, + construct_type( + type_=AgentConfigurationV1, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DeleteAgentConfigurationV1Response]: + """ + Deletes the specified agent configuration. Deleting an agent configuration can cause a production outage if your service references this agent UUID. Migrate all active sessions to a new configuration before deleting. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DeleteAgentConfigurationV1Response] + Agent configuration deleted + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DeleteAgentConfigurationV1Response, + construct_type( + type_=DeleteAgentConfigurationV1Response, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawConfigurationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[ListAgentConfigurationsV1Response]: + """ + Returns all agent configurations for the specified project. Configurations are returned in their uninterpolated form—template variable placeholders appear as-is rather than with their substituted values. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListAgentConfigurationsV1Response] + A list of agent configurations + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgentConfigurationsV1Response, + construct_type( + type_=ListAgentConfigurationsV1Response, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create( + self, + project_id: str, + *, + config: str, + metadata: typing.Optional[typing.Dict[str, str]] = OMIT, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateAgentConfigurationV1Response]: + """ + Creates a new reusable agent configuration. The `config` field must be a valid JSON string representing the `agent` block of a Settings message. The returned `agent_id` can be passed in place of the full `agent` object in future Settings messages. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + config : str + A valid JSON string representing the agent block of a Settings message + + metadata : typing.Optional[typing.Dict[str, str]] + A map of arbitrary key-value pairs for labeling or organizing the agent configuration + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateAgentConfigurationV1Response] + Agent configuration created successfully + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents", + base_url=self._client_wrapper.get_environment().base, + method="POST", + json={ + "config": config, + "metadata": metadata, + "api_version": api_version, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentConfigurationV1Response, + construct_type( + type_=CreateAgentConfigurationV1Response, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentConfigurationV1]: + """ + Returns the specified agent configuration in its uninterpolated form + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentConfigurationV1] + An agent configuration + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentConfigurationV1, + construct_type( + type_=AgentConfigurationV1, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update( + self, + project_id: str, + agent_id: str, + *, + metadata: typing.Dict[str, str], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentConfigurationV1]: + """ + Updates the metadata associated with an agent configuration. The config itself is immutable—to change the configuration, delete the existing agent and create a new one. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + metadata : typing.Dict[str, str] + A map of string key-value pairs to associate with this agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentConfigurationV1] + Agent configuration updated + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="PUT", + json={ + "metadata": metadata, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentConfigurationV1, + construct_type( + type_=AgentConfigurationV1, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, project_id: str, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DeleteAgentConfigurationV1Response]: + """ + Deletes the specified agent configuration. Deleting an agent configuration can cause a production outage if your service references this agent UUID. Migrate all active sessions to a new configuration before deleting. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + agent_id : str + The unique identifier of the agent configuration + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DeleteAgentConfigurationV1Response] + Agent configuration deleted + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agents/{jsonable_encoder(agent_id)}", + base_url=self._client_wrapper.get_environment().base, + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DeleteAgentConfigurationV1Response, + construct_type( + type_=DeleteAgentConfigurationV1Response, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/src/deepgram/voice_agent/raw_client.py b/src/deepgram/voice_agent/raw_client.py new file mode 100644 index 00000000..72d220a7 --- /dev/null +++ b/src/deepgram/voice_agent/raw_client.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper + + +class RawVoiceAgentClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + +class AsyncRawVoiceAgentClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper diff --git a/src/deepgram/voice_agent/variables/__init__.py b/src/deepgram/voice_agent/variables/__init__.py new file mode 100644 index 00000000..5cde0202 --- /dev/null +++ b/src/deepgram/voice_agent/variables/__init__.py @@ -0,0 +1,4 @@ +# This file was auto-generated by Fern from our API Definition. + +# isort: skip_file + diff --git a/src/deepgram/voice_agent/variables/client.py b/src/deepgram/voice_agent/variables/client.py new file mode 100644 index 00000000..663c7cb0 --- /dev/null +++ b/src/deepgram/voice_agent/variables/client.py @@ -0,0 +1,491 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.request_options import RequestOptions +from ...types.agent_variable_v1 import AgentVariableV1 +from ...types.delete_agent_variable_v1response import DeleteAgentVariableV1Response +from ...types.list_agent_variables_v1response import ListAgentVariablesV1Response +from .raw_client import AsyncRawVariablesClient, RawVariablesClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class VariablesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawVariablesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawVariablesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawVariablesClient + """ + return self._raw_client + + def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ListAgentVariablesV1Response: + """ + Returns all template variables for the specified project + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgentVariablesV1Response + A list of agent variables + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.variables.list( + project_id="123456-7890-1234-5678-901234", + ) + """ + _response = self._raw_client.list(project_id, request_options=request_options) + return _response.data + + def create( + self, + project_id: str, + *, + key: str, + value: typing.Any, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentVariableV1: + """ + Creates a new template variable. Variables follow the `DG_` naming format and can substitute any JSON value in an agent configuration. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + key : str + The variable name, following the DG_ format + + value : typing.Any + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + Agent variable created successfully + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.variables.create( + project_id="project_id", + key="key", + value={"key": "value"}, + ) + """ + _response = self._raw_client.create( + project_id, key=key, value=value, api_version=api_version, request_options=request_options + ) + return _response.data + + def get( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AgentVariableV1: + """ + Returns the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + An agent variable + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.variables.get( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + """ + _response = self._raw_client.get(project_id, variable_id, request_options=request_options) + return _response.data + + def delete( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteAgentVariableV1Response: + """ + Deletes the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeleteAgentVariableV1Response + Agent variable deleted + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.variables.delete( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + """ + _response = self._raw_client.delete(project_id, variable_id, request_options=request_options) + return _response.data + + def update( + self, + project_id: str, + variable_id: str, + *, + value: typing.Any, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentVariableV1: + """ + Updates the value of an existing template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + value : typing.Any + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + Agent variable updated + + Examples + -------- + from deepgram import DeepgramClient + + client = DeepgramClient( + api_key="YOUR_API_KEY", + ) + client.voice_agent.variables.update( + project_id="project_id", + variable_id="variable_id", + value={"key": "value"}, + ) + """ + _response = self._raw_client.update(project_id, variable_id, value=value, request_options=request_options) + return _response.data + + +class AsyncVariablesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawVariablesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawVariablesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawVariablesClient + """ + return self._raw_client + + async def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ListAgentVariablesV1Response: + """ + Returns all template variables for the specified project + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgentVariablesV1Response + A list of agent variables + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.variables.list( + project_id="123456-7890-1234-5678-901234", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.list(project_id, request_options=request_options) + return _response.data + + async def create( + self, + project_id: str, + *, + key: str, + value: typing.Any, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentVariableV1: + """ + Creates a new template variable. Variables follow the `DG_` naming format and can substitute any JSON value in an agent configuration. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + key : str + The variable name, following the DG_ format + + value : typing.Any + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + Agent variable created successfully + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.variables.create( + project_id="project_id", + key="key", + value={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.create( + project_id, key=key, value=value, api_version=api_version, request_options=request_options + ) + return _response.data + + async def get( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AgentVariableV1: + """ + Returns the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + An agent variable + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.variables.get( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.get(project_id, variable_id, request_options=request_options) + return _response.data + + async def delete( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteAgentVariableV1Response: + """ + Deletes the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeleteAgentVariableV1Response + Agent variable deleted + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.variables.delete( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.delete(project_id, variable_id, request_options=request_options) + return _response.data + + async def update( + self, + project_id: str, + variable_id: str, + *, + value: typing.Any, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentVariableV1: + """ + Updates the value of an existing template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + value : typing.Any + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentVariableV1 + Agent variable updated + + Examples + -------- + import asyncio + + from deepgram import AsyncDeepgramClient + + client = AsyncDeepgramClient( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.voice_agent.variables.update( + project_id="project_id", + variable_id="variable_id", + value={"key": "value"}, + ) + + + asyncio.run(main()) + """ + _response = await self._raw_client.update(project_id, variable_id, value=value, request_options=request_options) + return _response.data diff --git a/src/deepgram/voice_agent/variables/raw_client.py b/src/deepgram/voice_agent/variables/raw_client.py new file mode 100644 index 00000000..22294111 --- /dev/null +++ b/src/deepgram/voice_agent/variables/raw_client.py @@ -0,0 +1,630 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from json.decoder import JSONDecodeError + +from ...core.api_error import ApiError +from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from ...core.http_response import AsyncHttpResponse, HttpResponse +from ...core.jsonable_encoder import jsonable_encoder +from ...core.request_options import RequestOptions +from ...core.unchecked_base_model import construct_type +from ...errors.bad_request_error import BadRequestError +from ...types.agent_variable_v1 import AgentVariableV1 +from ...types.delete_agent_variable_v1response import DeleteAgentVariableV1Response +from ...types.list_agent_variables_v1response import ListAgentVariablesV1Response + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawVariablesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[ListAgentVariablesV1Response]: + """ + Returns all template variables for the specified project + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListAgentVariablesV1Response] + A list of agent variables + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgentVariablesV1Response, + construct_type( + type_=ListAgentVariablesV1Response, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def create( + self, + project_id: str, + *, + key: str, + value: typing.Any, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentVariableV1]: + """ + Creates a new template variable. Variables follow the `DG_` naming format and can substitute any JSON value in an agent configuration. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + key : str + The variable name, following the DG_ format + + value : typing.Any + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentVariableV1] + Agent variable created successfully + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables", + base_url=self._client_wrapper.get_environment().base, + method="POST", + json={ + "key": key, + "value": value, + "api_version": api_version, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def get( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentVariableV1]: + """ + Returns the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentVariableV1] + An agent variable + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def delete( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DeleteAgentVariableV1Response]: + """ + Deletes the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DeleteAgentVariableV1Response] + Agent variable deleted + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DeleteAgentVariableV1Response, + construct_type( + type_=DeleteAgentVariableV1Response, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + def update( + self, + project_id: str, + variable_id: str, + *, + value: typing.Any, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentVariableV1]: + """ + Updates the value of an existing template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + value : typing.Any + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentVariableV1] + Agent variable updated + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="PATCH", + json={ + "value": value, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + +class AsyncRawVariablesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[ListAgentVariablesV1Response]: + """ + Returns all template variables for the specified project + + Parameters + ---------- + project_id : str + The unique identifier of the project + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListAgentVariablesV1Response] + A list of agent variables + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgentVariablesV1Response, + construct_type( + type_=ListAgentVariablesV1Response, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def create( + self, + project_id: str, + *, + key: str, + value: typing.Any, + api_version: typing.Optional[int] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentVariableV1]: + """ + Creates a new template variable. Variables follow the `DG_` naming format and can substitute any JSON value in an agent configuration. + + Parameters + ---------- + project_id : str + The unique identifier of the project + + key : str + The variable name, following the DG_ format + + value : typing.Any + + api_version : typing.Optional[int] + API version. Defaults to 1 + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentVariableV1] + Agent variable created successfully + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables", + base_url=self._client_wrapper.get_environment().base, + method="POST", + json={ + "key": key, + "value": value, + "api_version": api_version, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def get( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentVariableV1]: + """ + Returns the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentVariableV1] + An agent variable + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def delete( + self, project_id: str, variable_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DeleteAgentVariableV1Response]: + """ + Deletes the specified template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DeleteAgentVariableV1Response] + Agent variable deleted + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DeleteAgentVariableV1Response, + construct_type( + type_=DeleteAgentVariableV1Response, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) + + async def update( + self, + project_id: str, + variable_id: str, + *, + value: typing.Any, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentVariableV1]: + """ + Updates the value of an existing template variable + + Parameters + ---------- + project_id : str + The unique identifier of the project + + variable_id : str + The unique identifier of the agent variable + + value : typing.Any + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentVariableV1] + Agent variable updated + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/projects/{jsonable_encoder(project_id)}/agent-variables/{jsonable_encoder(variable_id)}", + base_url=self._client_wrapper.get_environment().base, + method="PATCH", + json={ + "value": value, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentVariableV1, + construct_type( + type_=AgentVariableV1, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 400: + raise BadRequestError( + headers=dict(_response.headers), + body=typing.cast( + typing.Any, + construct_type( + type_=typing.Any, # type: ignore + object_=_response.json(), + ), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text) + raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json) diff --git a/tests/custom/test_websocket_control_messages.py b/tests/custom/test_websocket_control_messages.py deleted file mode 100644 index 68ff778a..00000000 --- a/tests/custom/test_websocket_control_messages.py +++ /dev/null @@ -1,186 +0,0 @@ -"""Tests that control send_ methods work without requiring a message argument. - -Regression test for the breaking change where optional message params were lost -during a Fern regen, causing TypeError for callers using no-arg control calls. -""" - -import json -from unittest.mock import AsyncMock, MagicMock - -import pytest - -from deepgram.agent.v1.socket_client import AsyncV1SocketClient as AsyncAgentV1SocketClient -from deepgram.agent.v1.socket_client import V1SocketClient as AgentV1SocketClient -from deepgram.listen.v1.socket_client import AsyncV1SocketClient as AsyncListenV1SocketClient -from deepgram.listen.v1.socket_client import V1SocketClient as ListenV1SocketClient -from deepgram.listen.v2.socket_client import AsyncV2SocketClient as AsyncListenV2SocketClient -from deepgram.listen.v2.socket_client import V2SocketClient as ListenV2SocketClient -from deepgram.speak.v1.socket_client import AsyncV1SocketClient as AsyncSpeakV1SocketClient -from deepgram.speak.v1.socket_client import V1SocketClient as SpeakV1SocketClient - - -# --------------------------------------------------------------------------- -# Helpers -# --------------------------------------------------------------------------- - -def _make_async_ws(): - ws = AsyncMock() - ws.send = AsyncMock() - return ws - - -def _make_sync_ws(): - ws = MagicMock() - ws.send = MagicMock() - return ws - - -def _sent_json(ws): - """Return the parsed JSON from the first send() call.""" - call_args = ws.send.call_args - data = call_args[0][0] - return json.loads(data) - - -# --------------------------------------------------------------------------- -# speak/v1 — async -# --------------------------------------------------------------------------- - -class TestAsyncSpeakV1ControlMessages: - async def test_send_flush_no_args(self): - ws = _make_async_ws() - client = AsyncSpeakV1SocketClient(websocket=ws) - await client.send_flush() - assert _sent_json(ws)["type"] == "Flush" - - async def test_send_clear_no_args(self): - ws = _make_async_ws() - client = AsyncSpeakV1SocketClient(websocket=ws) - await client.send_clear() - assert _sent_json(ws)["type"] == "Clear" - - async def test_send_close_no_args(self): - ws = _make_async_ws() - client = AsyncSpeakV1SocketClient(websocket=ws) - await client.send_close() - assert _sent_json(ws)["type"] == "Close" - - -# --------------------------------------------------------------------------- -# speak/v1 — sync -# --------------------------------------------------------------------------- - -class TestSyncSpeakV1ControlMessages: - def test_send_flush_no_args(self): - ws = _make_sync_ws() - client = SpeakV1SocketClient(websocket=ws) - client.send_flush() - assert _sent_json(ws)["type"] == "Flush" - - def test_send_clear_no_args(self): - ws = _make_sync_ws() - client = SpeakV1SocketClient(websocket=ws) - client.send_clear() - assert _sent_json(ws)["type"] == "Clear" - - def test_send_close_no_args(self): - ws = _make_sync_ws() - client = SpeakV1SocketClient(websocket=ws) - client.send_close() - assert _sent_json(ws)["type"] == "Close" - - -# --------------------------------------------------------------------------- -# listen/v1 — async -# --------------------------------------------------------------------------- - -class TestAsyncListenV1ControlMessages: - async def test_send_finalize_no_args(self): - ws = _make_async_ws() - client = AsyncListenV1SocketClient(websocket=ws) - await client.send_finalize() - assert _sent_json(ws)["type"] == "Finalize" - - async def test_send_close_stream_no_args(self): - ws = _make_async_ws() - client = AsyncListenV1SocketClient(websocket=ws) - await client.send_close_stream() - assert _sent_json(ws)["type"] == "CloseStream" - - async def test_send_keep_alive_no_args(self): - ws = _make_async_ws() - client = AsyncListenV1SocketClient(websocket=ws) - await client.send_keep_alive() - assert _sent_json(ws)["type"] == "KeepAlive" - - -# --------------------------------------------------------------------------- -# listen/v1 — sync -# --------------------------------------------------------------------------- - -class TestSyncListenV1ControlMessages: - def test_send_finalize_no_args(self): - ws = _make_sync_ws() - client = ListenV1SocketClient(websocket=ws) - client.send_finalize() - assert _sent_json(ws)["type"] == "Finalize" - - def test_send_close_stream_no_args(self): - ws = _make_sync_ws() - client = ListenV1SocketClient(websocket=ws) - client.send_close_stream() - assert _sent_json(ws)["type"] == "CloseStream" - - def test_send_keep_alive_no_args(self): - ws = _make_sync_ws() - client = ListenV1SocketClient(websocket=ws) - client.send_keep_alive() - assert _sent_json(ws)["type"] == "KeepAlive" - - -# --------------------------------------------------------------------------- -# listen/v2 — async -# --------------------------------------------------------------------------- - -class TestAsyncListenV2ControlMessages: - async def test_send_close_stream_no_args(self): - ws = _make_async_ws() - client = AsyncListenV2SocketClient(websocket=ws) - await client.send_close_stream() - assert _sent_json(ws)["type"] == "CloseStream" - - -# --------------------------------------------------------------------------- -# listen/v2 — sync -# --------------------------------------------------------------------------- - -class TestSyncListenV2ControlMessages: - def test_send_close_stream_no_args(self): - ws = _make_sync_ws() - client = ListenV2SocketClient(websocket=ws) - client.send_close_stream() - assert _sent_json(ws)["type"] == "CloseStream" - - -# --------------------------------------------------------------------------- -# agent/v1 — async -# --------------------------------------------------------------------------- - -class TestAsyncAgentV1ControlMessages: - async def test_send_keep_alive_no_args(self): - ws = _make_async_ws() - client = AsyncAgentV1SocketClient(websocket=ws) - await client.send_keep_alive() - assert _sent_json(ws)["type"] == "KeepAlive" - - -# --------------------------------------------------------------------------- -# agent/v1 — sync -# --------------------------------------------------------------------------- - -class TestSyncAgentV1ControlMessages: - def test_send_keep_alive_no_args(self): - ws = _make_sync_ws() - client = AgentV1SocketClient(websocket=ws) - client.send_keep_alive() - assert _sent_json(ws)["type"] == "KeepAlive" diff --git a/tests/wire/test_voiceAgent_configurations.py b/tests/wire/test_voiceAgent_configurations.py new file mode 100644 index 00000000..5ddb40a5 --- /dev/null +++ b/tests/wire/test_voiceAgent_configurations.py @@ -0,0 +1,66 @@ +from .conftest import get_client, verify_request_count + + +def test_voiceAgent_configurations_list_() -> None: + """Test list endpoint with WireMock""" + test_id = "voice_agent.configurations.list_.0" + client = get_client(test_id) + client.voice_agent.configurations.list( + project_id="123456-7890-1234-5678-901234", + ) + verify_request_count(test_id, "GET", "/v1/projects/123456-7890-1234-5678-901234/agents", None, 1) + + +def test_voiceAgent_configurations_create() -> None: + """Test create endpoint with WireMock""" + test_id = "voice_agent.configurations.create.0" + client = get_client(test_id) + client.voice_agent.configurations.create( + project_id="123456-7890-1234-5678-901234", + config="config", + ) + verify_request_count(test_id, "POST", "/v1/projects/123456-7890-1234-5678-901234/agents", None, 1) + + +def test_voiceAgent_configurations_get() -> None: + """Test get endpoint with WireMock""" + test_id = "voice_agent.configurations.get.0" + client = get_client(test_id) + client.voice_agent.configurations.get( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + verify_request_count( + test_id, "GET", "/v1/projects/123456-7890-1234-5678-901234/agents/a1b2c3d4-e5f6-7890-abcd-ef1234567890", None, 1 + ) + + +def test_voiceAgent_configurations_update() -> None: + """Test update endpoint with WireMock""" + test_id = "voice_agent.configurations.update.0" + client = get_client(test_id) + client.voice_agent.configurations.update( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + metadata={"key": "value"}, + ) + verify_request_count( + test_id, "PUT", "/v1/projects/123456-7890-1234-5678-901234/agents/a1b2c3d4-e5f6-7890-abcd-ef1234567890", None, 1 + ) + + +def test_voiceAgent_configurations_delete() -> None: + """Test delete endpoint with WireMock""" + test_id = "voice_agent.configurations.delete.0" + client = get_client(test_id) + client.voice_agent.configurations.delete( + project_id="123456-7890-1234-5678-901234", + agent_id="a1b2c3d4-e5f6-7890-abcd-ef1234567890", + ) + verify_request_count( + test_id, + "DELETE", + "/v1/projects/123456-7890-1234-5678-901234/agents/a1b2c3d4-e5f6-7890-abcd-ef1234567890", + None, + 1, + ) diff --git a/tests/wire/test_voiceAgent_variables.py b/tests/wire/test_voiceAgent_variables.py new file mode 100644 index 00000000..a325c663 --- /dev/null +++ b/tests/wire/test_voiceAgent_variables.py @@ -0,0 +1,69 @@ +from .conftest import get_client, verify_request_count + + +def test_voiceAgent_variables_list_() -> None: + """Test list endpoint with WireMock""" + test_id = "voice_agent.variables.list_.0" + client = get_client(test_id) + client.voice_agent.variables.list( + project_id="123456-7890-1234-5678-901234", + ) + verify_request_count(test_id, "GET", "/v1/projects/123456-7890-1234-5678-901234/agent-variables", None, 1) + + +def test_voiceAgent_variables_create() -> None: + """Test create endpoint with WireMock""" + test_id = "voice_agent.variables.create.0" + client = get_client(test_id) + client.voice_agent.variables.create( + project_id="project_id", + key="key", + value={"key": "value"}, + ) + verify_request_count(test_id, "POST", "/v1/projects/project_id/agent-variables", None, 1) + + +def test_voiceAgent_variables_get() -> None: + """Test get endpoint with WireMock""" + test_id = "voice_agent.variables.get.0" + client = get_client(test_id) + client.voice_agent.variables.get( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + verify_request_count( + test_id, + "GET", + "/v1/projects/123456-7890-1234-5678-901234/agent-variables/v1a2b3c4-d5e6-7890-abcd-ef1234567890", + None, + 1, + ) + + +def test_voiceAgent_variables_delete() -> None: + """Test delete endpoint with WireMock""" + test_id = "voice_agent.variables.delete.0" + client = get_client(test_id) + client.voice_agent.variables.delete( + project_id="123456-7890-1234-5678-901234", + variable_id="v1a2b3c4-d5e6-7890-abcd-ef1234567890", + ) + verify_request_count( + test_id, + "DELETE", + "/v1/projects/123456-7890-1234-5678-901234/agent-variables/v1a2b3c4-d5e6-7890-abcd-ef1234567890", + None, + 1, + ) + + +def test_voiceAgent_variables_update() -> None: + """Test update endpoint with WireMock""" + test_id = "voice_agent.variables.update.0" + client = get_client(test_id) + client.voice_agent.variables.update( + project_id="project_id", + variable_id="variable_id", + value={"key": "value"}, + ) + verify_request_count(test_id, "PATCH", "/v1/projects/project_id/agent-variables/variable_id", None, 1) diff --git a/wiremock/wiremock-mappings.json b/wiremock/wiremock-mappings.json index 4248699c..630c6a96 100644 --- a/wiremock/wiremock-mappings.json +++ b/wiremock/wiremock-mappings.json @@ -1 +1 @@ -{"mappings":[{"id":"533b5d52-ab21-4763-aaae-87cf52f49aa5","name":"List Agent Think Models - default","request":{"urlPathTemplate":"/v1/agent/settings/think/models","method":"GET"},"response":{"status":200,"body":"{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"name\",\n \"provider\": \"open_ai\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"533b5d52-ab21-4763-aaae-87cf52f49aa5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"20e1029e-8bb9-4092-a809-b943e60822ef","name":"Token-based Authentication - default","request":{"urlPathTemplate":"/v1/auth/grant","method":"POST"},"response":{"status":200,"body":"{\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.dozjgNryP4J3jVmNHl0w5N_XgL0n3I9PlFUP0THsR8U\",\n \"expires_in\": 30\n}","headers":{"Content-Type":"application/json"}},"uuid":"20e1029e-8bb9-4092-a809-b943e60822ef","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"49d8d51a-7f01-4598-804f-b6f54cdc22da","name":"Transcribe and analyze pre-recorded audio and video - default","request":{"urlPathTemplate":"/v1/listen","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"extra":{"equalTo":"extra"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"linear16"},"filler_words":{"equalTo":"true"},"keywords":{"equalTo":"keywords"},"language":{"equalTo":"language"},"measurements":{"equalTo":"true"},"model":{"equalTo":"nova-3"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"redact"},"replace":{"equalTo":"replace"},"search":{"equalTo":"search"},"smart_format":{"equalTo":"true"},"utterances":{"equalTo":"true"},"utt_split":{"equalTo":"1.1"},"version":{"equalTo":"latest"},"mip_opt_out":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"request_id\": \"a847f427-4ad5-4d67-9b95-db801e58251c\",\n \"sha256\": \"154e291ecfa8be6ab8343560bcc109008fa7853eb5372533e8efdefc9b504c33\",\n \"created\": \"2024-05-12T18:57:13Z\",\n \"duration\": 25.933313,\n \"channels\": 1,\n \"models\": [\n \"30089e05-99d1-4376-b32e-c263170674af\"\n ],\n \"model_info\": {\n \"30089e05-99d1-4376-b32e-c263170674af\": {\n \"name\": \"2-general-nova\",\n \"version\": \"2024-01-09.29447\",\n \"arch\": \"nova-2\"\n }\n },\n \"summary_info\": {\n \"model_uuid\": \"67875a7f-c9c4-48a0-aa55-5bdb8a91c34a\",\n \"input_tokens\": 95,\n \"output_tokens\": 63\n },\n \"sentiment_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 105\n },\n \"topics_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 7\n },\n \"intents_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 4\n },\n \"tags\": [\n \"test\"\n ]\n },\n \"results\": {\n \"channels\": [\n {}\n ],\n \"utterances\": [\n {}\n ],\n \"summary\": {\n \"result\": \"success\",\n \"short\": \"Speaker 0 discusses the significance of the first all-female spacewalk with an all-female team, stating that it is a tribute to the skilled and qualified women who were denied opportunities in the past.\"\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"49d8d51a-7f01-4598-804f-b6f54cdc22da","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0a0be61b-f024-4120-9c54-23bca3e07c93","name":"List Models - default","request":{"urlPathTemplate":"/v1/models","method":"GET","queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"0a0be61b-f024-4120-9c54-23bca3e07c93","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","name":"Get a specific Model - default","request":{"urlPathTemplate":"/v1/models/{model_id}","method":"GET","pathParameters":{"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d08cac56-bc4d-4756-8fd1-d508914334d5","name":"List Projects - default","request":{"urlPathTemplate":"/v1/projects","method":"GET"},"response":{"status":200,"body":"{\n \"projects\": [\n {\n \"project_id\": \"project_id\",\n \"name\": \"name\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d08cac56-bc4d-4756-8fd1-d508914334d5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","name":"Get a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"project_id\": \"project_id\",\n \"mip_opt_out\": true,\n \"name\": \"name\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"68918577-6401-4439-8533-356257ff7bcf","name":"Delete a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"68918577-6401-4439-8533-356257ff7bcf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"659fc38b-3934-4e43-93bf-d331f547449e","name":"Update a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"PATCH","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"Successfully updated project info.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"659fc38b-3934-4e43-93bf-d331f547449e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","name":"Leave a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/leave","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","name":"List Project Keys - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"status":{"equalTo":"active"}}},"response":{"status":200,"body":"{\n \"api_keys\": [\n {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\"\n },\n \"api_key\": {\n \"api_key_id\": \"1234567890abcdef1234567890abcdef\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0167c735-0b6f-4715-8df8-32300d4dae72","name":"Create a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"POST","pathParameters":{"project_id":{"equalTo":"project_id"}}},"response":{"status":200,"body":"{\n \"api_key_id\": \"api_key_id\",\n \"key\": \"key\",\n \"comment\": \"comment\",\n \"scopes\": [\n \"scopes\",\n \"scopes\"\n ],\n \"tags\": [\n \"tags\",\n \"tags\"\n ],\n \"expiration_date\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"0167c735-0b6f-4715-8df8-32300d4dae72","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"c9812dd3-f87e-4798-aec3-af0933330dd5","name":"Get a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"item\": {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"api_key\": {\n \"api_key_id\": \"1000-2000-3000-4000\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"tags\": [\n \"prod\",\n \"west-region\"\n ],\n \"expiration_date\": \"2021-01-01T00:00:00Z\",\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"c9812dd3-f87e-4798-aec3-af0933330dd5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","name":"Delete a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"91e103d5-72f7-463d-840d-310069e33de9","name":"List Project Members - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"members\": [\n {\n \"member_id\": \"member_id\",\n \"email\": \"email\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"91e103d5-72f7-463d-840d-310069e33de9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","name":"Delete a Project Member - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a920ad0e-2796-4361-ac16-ac83fb75e32a","name":"List Project Models - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"a920ad0e-2796-4361-ac16-ac83fb75e32a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","name":"Get a Project Model - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models/{model_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","name":"List Project Requests - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"2024-01-15T09:30:00Z"},"end":{"equalTo":"2024-01-15T09:30:00Z"},"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"request_id":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"endpoint":{"equalTo":"listen"},"method":{"equalTo":"sync"},"status":{"equalTo":"succeeded"}}},"response":{"status":200,"body":"{\n \"page\": 1.1,\n \"limit\": 1.1,\n \"requests\": [\n {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"733e39aa-d3ef-4ea7-8062-af080c6288c4","name":"Get a Project Request - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests/{request_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"request_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"request\": {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"733e39aa-d3ef-4ea7-8062-af080c6288c4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","name":"Get Project Usage - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2024-10-16\",\n \"end\": \"2024-10-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b132121b-4efe-42ad-a268-8acac35c189b","name":"Get Project Balances - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balances\": [\n {\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"b132121b-4efe-42ad-a268-8acac35c189b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4019c244-52d3-4d57-902c-af837631650a","name":"Get a Project Balance - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances/{balance_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"balance_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"4019c244-52d3-4d57-902c-af837631650a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"555b6751-587f-400c-bf5e-400e108ad6b4","name":"Get Project Billing Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"tag":{"equalTo":"tag1"},"line_item":{"equalTo":"streaming::nova-3"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"dollars\": 0.25,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"deployment\": \"hosted\",\n \"line_item\": \"streaming::nova-3\",\n \"tags\": [\n \"tag1\",\n \"tag2\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"555b6751-587f-400c-bf5e-400e108ad6b4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a61ae38c-e41f-4726-a55c-88f2135897be","name":"List Project Billing Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"accessors\": [\n \"12345678-1234-1234-1234-123456789012\",\n \"87654321-4321-4321-4321-210987654321\"\n ],\n \"deployments\": [\n \"hosted\",\n \"self-hosted\"\n ],\n \"tags\": [\n \"dev\",\n \"production\"\n ],\n \"line_items\": {\n \"streaming::nova-3\": \"Nova - 3 (Stream)\",\n \"sync::aura-2\": \"Aura -2 (Sync)\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a61ae38c-e41f-4726-a55c-88f2135897be","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","name":"List Project Purchases - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/purchases","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"orders\": [\n {\n \"order_id\": \"025e19ba-b6d9-4a04-9f99-4fe715aca5f1\",\n \"expiration\": \"2026-03-04T00:00:00Z\",\n \"created\": \"2023-02-21T21:13:40Z\",\n \"amount\": 150,\n \"units\": \"usd\",\n \"order_type\": \"promotional\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2dd14c67-ed4e-4d97-9636-0a712899deb8","name":"List Project Invites - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"invites\": [\n {\n \"email\": \"email\",\n \"scope\": \"scope\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"2dd14c67-ed4e-4d97-9636-0a712899deb8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"7c109496-adfe-4e85-b007-a6f799ee95cb","name":"Create a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"7c109496-adfe-4e85-b007-a6f799ee95cb","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6d268d0-d91e-4a65-80e0-339621173db9","name":"Delete a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites/{email}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"email":{"equalTo":"john.doe@example.com"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6d268d0-d91e-4a65-80e0-339621173db9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1b965d71-c930-4a0b-90f3-2289f80f3634","name":"List Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"scopes\": [\n \"scopes\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"1b965d71-c930-4a0b-90f3-2289f80f3634","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"eb2f5de2-b887-47be-abd5-7cb702aca55d","name":"Update Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"PUT","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"eb2f5de2-b887-47be-abd5-7cb702aca55d","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9bdf51a4-1e10-41b8-8de2-2df650562db3","name":"Get Project Usage Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"grouping":{"equalTo":"accessor"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"hours\": 1619.7242069444444,\n \"total_hours\": 1621.7395791666668,\n \"agent_hours\": 41.33564388888889,\n \"tokens_in\": 0,\n \"tokens_out\": 0,\n \"tts_characters\": 9158866,\n \"requests\": 373381,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"endpoint\": \"listen\",\n \"feature_set\": \"punctuate\",\n \"models\": [\n \"Nova-2\"\n ],\n \"method\": \"async\",\n \"tags\": \"tag1\",\n \"deployment\": \"self-hosted\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"9bdf51a4-1e10-41b8-8de2-2df650562db3","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","name":"List Project Usage Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"tags\": [\n \"tag=dev\",\n \"tag=production\"\n ],\n \"models\": [\n {\n \"name\": \"2-medical-nova\",\n \"language\": \"en-MY\",\n \"version\": \"2024-05-31.13574\",\n \"model_id\": \"1234567890-12345-67890\"\n }\n ],\n \"processing_methods\": [\n \"sync\",\n \"streaming\"\n ],\n \"features\": [\n \"alternatives\",\n \"detect_entities\",\n \"detect_language\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","name":"Analyze text content - default","request":{"urlPathTemplate":"/v1/read","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"language":{"equalTo":"language"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"metadata\": {\n \"request_id\": \"d04af392-db11-4c1d-83e1-20e34f0b8999\",\n \"created\": \"2024-11-18T23:47:44Z\",\n \"language\": \"en\"\n }\n },\n \"results\": {\n \"summary\": {\n \"results\": {\n \"summary\": {\n \"text\": \"The summary of the text submitted.\"\n }\n }\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","name":"List Project Self-Hosted Distribution Credentials - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"distribution_credentials\": [\n {\n \"member\": {\n \"member_id\": \"3376abcd-8e5e-49d3-92d4-876d3a4f0363\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"8b36cfd0-472f-4a21-833f-2d6343c3a2f3\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","name":"Create a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"provider":{"equalTo":"quay"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a47cd13f-2314-4190-b2c7-20436ccffbd2","name":"Get a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a47cd13f-2314-4190-b2c7-20436ccffbd2","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","name":"Delete a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","name":"Text to Speech transformation - default","request":{"urlPathTemplate":"/v1/speak","method":"POST"},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}}],"meta":{"total":39}} \ No newline at end of file +{"mappings":[{"id":"533b5d52-ab21-4763-aaae-87cf52f49aa5","name":"List Agent Think Models - default","request":{"urlPathTemplate":"/v1/agent/settings/think/models","method":"GET"},"response":{"status":200,"body":"{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"name\",\n \"provider\": \"open_ai\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"533b5d52-ab21-4763-aaae-87cf52f49aa5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"20e1029e-8bb9-4092-a809-b943e60822ef","name":"Token-based Authentication - default","request":{"urlPathTemplate":"/v1/auth/grant","method":"POST"},"response":{"status":200,"body":"{\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.dozjgNryP4J3jVmNHl0w5N_XgL0n3I9PlFUP0THsR8U\",\n \"expires_in\": 30\n}","headers":{"Content-Type":"application/json"}},"uuid":"20e1029e-8bb9-4092-a809-b943e60822ef","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"49d8d51a-7f01-4598-804f-b6f54cdc22da","name":"Transcribe and analyze pre-recorded audio and video - default","request":{"urlPathTemplate":"/v1/listen","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"extra":{"equalTo":"extra"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"linear16"},"filler_words":{"equalTo":"true"},"keywords":{"equalTo":"keywords"},"language":{"equalTo":"language"},"measurements":{"equalTo":"true"},"model":{"equalTo":"nova-3"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"redact"},"replace":{"equalTo":"replace"},"search":{"equalTo":"search"},"smart_format":{"equalTo":"true"},"utterances":{"equalTo":"true"},"utt_split":{"equalTo":"1.1"},"version":{"equalTo":"latest"},"mip_opt_out":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"request_id\": \"a847f427-4ad5-4d67-9b95-db801e58251c\",\n \"sha256\": \"154e291ecfa8be6ab8343560bcc109008fa7853eb5372533e8efdefc9b504c33\",\n \"created\": \"2024-05-12T18:57:13Z\",\n \"duration\": 25.933313,\n \"channels\": 1,\n \"models\": [\n \"30089e05-99d1-4376-b32e-c263170674af\"\n ],\n \"model_info\": {\n \"30089e05-99d1-4376-b32e-c263170674af\": {\n \"name\": \"2-general-nova\",\n \"version\": \"2024-01-09.29447\",\n \"arch\": \"nova-2\"\n }\n },\n \"summary_info\": {\n \"model_uuid\": \"67875a7f-c9c4-48a0-aa55-5bdb8a91c34a\",\n \"input_tokens\": 95,\n \"output_tokens\": 63\n },\n \"sentiment_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 105\n },\n \"topics_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 7\n },\n \"intents_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 4\n },\n \"tags\": [\n \"test\"\n ]\n },\n \"results\": {\n \"channels\": [\n {}\n ],\n \"utterances\": [\n {}\n ],\n \"summary\": {\n \"result\": \"success\",\n \"short\": \"Speaker 0 discusses the significance of the first all-female spacewalk with an all-female team, stating that it is a tribute to the skilled and qualified women who were denied opportunities in the past.\"\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"49d8d51a-7f01-4598-804f-b6f54cdc22da","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0a0be61b-f024-4120-9c54-23bca3e07c93","name":"List Models - default","request":{"urlPathTemplate":"/v1/models","method":"GET","queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"0a0be61b-f024-4120-9c54-23bca3e07c93","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","name":"Get a specific Model - default","request":{"urlPathTemplate":"/v1/models/{model_id}","method":"GET","pathParameters":{"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d08cac56-bc4d-4756-8fd1-d508914334d5","name":"List Projects - default","request":{"urlPathTemplate":"/v1/projects","method":"GET"},"response":{"status":200,"body":"{\n \"projects\": [\n {\n \"project_id\": \"project_id\",\n \"name\": \"name\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d08cac56-bc4d-4756-8fd1-d508914334d5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","name":"Get a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"project_id\": \"project_id\",\n \"mip_opt_out\": true,\n \"name\": \"name\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"68918577-6401-4439-8533-356257ff7bcf","name":"Delete a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"68918577-6401-4439-8533-356257ff7bcf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"659fc38b-3934-4e43-93bf-d331f547449e","name":"Update a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"PATCH","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"Successfully updated project info.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"659fc38b-3934-4e43-93bf-d331f547449e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","name":"Leave a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/leave","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","name":"List Project Keys - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"status":{"equalTo":"active"}}},"response":{"status":200,"body":"{\n \"api_keys\": [\n {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\"\n },\n \"api_key\": {\n \"api_key_id\": \"1234567890abcdef1234567890abcdef\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0167c735-0b6f-4715-8df8-32300d4dae72","name":"Create a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"POST","pathParameters":{"project_id":{"equalTo":"project_id"}}},"response":{"status":200,"body":"{\n \"api_key_id\": \"api_key_id\",\n \"key\": \"key\",\n \"comment\": \"comment\",\n \"scopes\": [\n \"scopes\",\n \"scopes\"\n ],\n \"tags\": [\n \"tags\",\n \"tags\"\n ],\n \"expiration_date\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"0167c735-0b6f-4715-8df8-32300d4dae72","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"c9812dd3-f87e-4798-aec3-af0933330dd5","name":"Get a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"item\": {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"api_key\": {\n \"api_key_id\": \"1000-2000-3000-4000\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"tags\": [\n \"prod\",\n \"west-region\"\n ],\n \"expiration_date\": \"2021-01-01T00:00:00Z\",\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"c9812dd3-f87e-4798-aec3-af0933330dd5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","name":"Delete a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"91e103d5-72f7-463d-840d-310069e33de9","name":"List Project Members - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"members\": [\n {\n \"member_id\": \"member_id\",\n \"email\": \"email\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"91e103d5-72f7-463d-840d-310069e33de9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","name":"Delete a Project Member - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a920ad0e-2796-4361-ac16-ac83fb75e32a","name":"List Project Models - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"a920ad0e-2796-4361-ac16-ac83fb75e32a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","name":"Get a Project Model - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models/{model_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","name":"List Project Requests - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"2024-01-15T09:30:00.000Z"},"end":{"equalTo":"2024-01-15T09:30:00.000Z"},"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"request_id":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"endpoint":{"equalTo":"listen"},"method":{"equalTo":"sync"},"status":{"equalTo":"succeeded"}}},"response":{"status":200,"body":"{\n \"page\": 1.1,\n \"limit\": 1.1,\n \"requests\": [\n {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"733e39aa-d3ef-4ea7-8062-af080c6288c4","name":"Get a Project Request - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests/{request_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"request_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"request\": {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"733e39aa-d3ef-4ea7-8062-af080c6288c4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","name":"Get Project Usage - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2024-10-16\",\n \"end\": \"2024-10-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b132121b-4efe-42ad-a268-8acac35c189b","name":"Get Project Balances - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balances\": [\n {\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"b132121b-4efe-42ad-a268-8acac35c189b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4019c244-52d3-4d57-902c-af837631650a","name":"Get a Project Balance - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances/{balance_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"balance_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"4019c244-52d3-4d57-902c-af837631650a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"555b6751-587f-400c-bf5e-400e108ad6b4","name":"Get Project Billing Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"tag":{"equalTo":"tag1"},"line_item":{"equalTo":"streaming::nova-3"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"dollars\": 0.25,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"deployment\": \"hosted\",\n \"line_item\": \"streaming::nova-3\",\n \"tags\": [\n \"tag1\",\n \"tag2\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"555b6751-587f-400c-bf5e-400e108ad6b4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a61ae38c-e41f-4726-a55c-88f2135897be","name":"List Project Billing Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"accessors\": [\n \"12345678-1234-1234-1234-123456789012\",\n \"87654321-4321-4321-4321-210987654321\"\n ],\n \"deployments\": [\n \"hosted\",\n \"self-hosted\"\n ],\n \"tags\": [\n \"dev\",\n \"production\"\n ],\n \"line_items\": {\n \"streaming::nova-3\": \"Nova - 3 (Stream)\",\n \"sync::aura-2\": \"Aura -2 (Sync)\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a61ae38c-e41f-4726-a55c-88f2135897be","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","name":"List Project Purchases - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/purchases","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"orders\": [\n {\n \"order_id\": \"025e19ba-b6d9-4a04-9f99-4fe715aca5f1\",\n \"expiration\": \"2026-03-04T00:00:00Z\",\n \"created\": \"2023-02-21T21:13:40Z\",\n \"amount\": 150,\n \"units\": \"usd\",\n \"order_type\": \"promotional\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2dd14c67-ed4e-4d97-9636-0a712899deb8","name":"List Project Invites - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"invites\": [\n {\n \"email\": \"email\",\n \"scope\": \"scope\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"2dd14c67-ed4e-4d97-9636-0a712899deb8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"7c109496-adfe-4e85-b007-a6f799ee95cb","name":"Create a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"7c109496-adfe-4e85-b007-a6f799ee95cb","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6d268d0-d91e-4a65-80e0-339621173db9","name":"Delete a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites/{email}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"email":{"equalTo":"john.doe@example.com"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6d268d0-d91e-4a65-80e0-339621173db9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1b965d71-c930-4a0b-90f3-2289f80f3634","name":"List Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"scopes\": [\n \"scopes\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"1b965d71-c930-4a0b-90f3-2289f80f3634","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"eb2f5de2-b887-47be-abd5-7cb702aca55d","name":"Update Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"PUT","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"eb2f5de2-b887-47be-abd5-7cb702aca55d","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9bdf51a4-1e10-41b8-8de2-2df650562db3","name":"Get Project Usage Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"grouping":{"equalTo":"accessor"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"hours\": 1619.7242069444444,\n \"total_hours\": 1621.7395791666668,\n \"agent_hours\": 41.33564388888889,\n \"tokens_in\": 0,\n \"tokens_out\": 0,\n \"tts_characters\": 9158866,\n \"requests\": 373381,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"endpoint\": \"listen\",\n \"feature_set\": \"punctuate\",\n \"models\": [\n \"Nova-2\"\n ],\n \"method\": \"async\",\n \"tags\": \"tag1\",\n \"deployment\": \"self-hosted\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"9bdf51a4-1e10-41b8-8de2-2df650562db3","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","name":"List Project Usage Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"tags\": [\n \"tag=dev\",\n \"tag=production\"\n ],\n \"models\": [\n {\n \"name\": \"2-medical-nova\",\n \"language\": \"en-MY\",\n \"version\": \"2024-05-31.13574\",\n \"model_id\": \"1234567890-12345-67890\"\n }\n ],\n \"processing_methods\": [\n \"sync\",\n \"streaming\"\n ],\n \"features\": [\n \"alternatives\",\n \"detect_entities\",\n \"detect_language\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","name":"Analyze text content - default","request":{"urlPathTemplate":"/v1/read","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"language":{"equalTo":"language"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"metadata\": {\n \"request_id\": \"d04af392-db11-4c1d-83e1-20e34f0b8999\",\n \"created\": \"2024-11-18T23:47:44Z\",\n \"language\": \"en\"\n }\n },\n \"results\": {\n \"summary\": {\n \"results\": {\n \"summary\": {\n \"text\": \"The summary of the text submitted.\"\n }\n }\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","name":"List Project Self-Hosted Distribution Credentials - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"distribution_credentials\": [\n {\n \"member\": {\n \"member_id\": \"3376abcd-8e5e-49d3-92d4-876d3a4f0363\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"8b36cfd0-472f-4a21-833f-2d6343c3a2f3\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","name":"Create a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"provider":{"equalTo":"quay"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a47cd13f-2314-4190-b2c7-20436ccffbd2","name":"Get a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a47cd13f-2314-4190-b2c7-20436ccffbd2","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","name":"Delete a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","name":"Text to Speech transformation - default","request":{"urlPathTemplate":"/v1/speak","method":"POST"},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"61883f62-09ec-4be9-b477-13a89b9677cf","name":"List Agent Configurations - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"agents\": [\n {\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"61883f62-09ec-4be9-b477-13a89b9677cf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a43cdbee-2b2f-47fb-8220-75d337cb6d3a","name":"Create an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a43cdbee-2b2f-47fb-8220-75d337cb6d3a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2575dd3c-b1c8-4007-8f87-4e32073a9dcf","name":"Get an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"2575dd3c-b1c8-4007-8f87-4e32073a9dcf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3b52c271-0e44-4148-b726-6f5ca8c91d26","name":"Update Agent Metadata - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"PUT","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3b52c271-0e44-4148-b726-6f5ca8c91d26","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"e839386d-beaa-4a7a-a478-ed9d9ab1b63e","name":"Delete an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"e839386d-beaa-4a7a-a478-ed9d9ab1b63e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"bb85071e-3933-4cd3-8fd7-d35eb4c992c5","name":"List Agent Variables - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"variables\": [\n {\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"bb85071e-3933-4cd3-8fd7-d35eb4c992c5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"c5016e4b-f863-4dc7-972f-c577ac7fdc47","name":"Create an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables","method":"POST","pathParameters":{"project_id":{"equalTo":"project_id"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"c5016e4b-f863-4dc7-972f-c577ac7fdc47","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"5bb4b688-8475-4911-8ad5-725b7d5338e3","name":"Get an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"variable_id":{"equalTo":"v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"5bb4b688-8475-4911-8ad5-725b7d5338e3","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2e2c7061-7573-423f-a907-2a15dd9f3307","name":"Delete an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"variable_id":{"equalTo":"v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"2e2c7061-7573-423f-a907-2a15dd9f3307","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"33765621-bc60-4ef4-b295-4b276aeb871f","name":"Update an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"PATCH","pathParameters":{"project_id":{"equalTo":"project_id"},"variable_id":{"equalTo":"variable_id"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"33765621-bc60-4ef4-b295-4b276aeb871f","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}}],"meta":{"total":49}} \ No newline at end of file From 75ed7392d81db50b72fee03fb8587e8767e4a95c Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 13:18:26 +0100 Subject: [PATCH 4/8] chore: restore .fernignore and remove .bak files after regen --- .fernignore | 16 +- src/deepgram/agent/v1/socket_client.py.bak | 342 ------------------ src/deepgram/listen/v1/socket_client.py.bak | 236 ------------ src/deepgram/listen/v2/socket_client.py.bak | 205 ----------- src/deepgram/speak/v1/socket_client.py.bak | 237 ------------ ...ves_item_paragraphs_paragraphs_item.py.bak | 29 -- ..._v1response_results_utterances_item.py.bak | 28 -- ..._results_utterances_item_words_item.py.bak | 26 -- 8 files changed, 7 insertions(+), 1112 deletions(-) delete mode 100644 src/deepgram/agent/v1/socket_client.py.bak delete mode 100644 src/deepgram/listen/v1/socket_client.py.bak delete mode 100644 src/deepgram/listen/v2/socket_client.py.bak delete mode 100644 src/deepgram/speak/v1/socket_client.py.bak delete mode 100644 src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak delete mode 100644 src/deepgram/types/listen_v1response_results_utterances_item.py.bak delete mode 100644 src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak diff --git a/.fernignore b/.fernignore index 9f7d7c1c..95cce1f0 100644 --- a/.fernignore +++ b/.fernignore @@ -11,18 +11,16 @@ src/deepgram/client.py # - optional message param on control send_ methods (send_keep_alive, send_close_stream, etc.) # so users don't need to instantiate the type themselves for no-payload control messages # [temporarily frozen — generator bugs in construct_type call convention and exception handling] -# [REGEN: .bak copies preserved, originals unfrozen for Fern to overwrite] -src/deepgram/agent/v1/socket_client.py.bak -src/deepgram/listen/v1/socket_client.py.bak -src/deepgram/listen/v2/socket_client.py.bak -src/deepgram/speak/v1/socket_client.py.bak +src/deepgram/agent/v1/socket_client.py +src/deepgram/listen/v1/socket_client.py +src/deepgram/listen/v2/socket_client.py +src/deepgram/speak/v1/socket_client.py # Type files with manual int type corrections (Fern generates float for speaker/channel/num_words) # [temporarily frozen — waiting on internal-api-specs#205] -# [REGEN: .bak copies preserved, originals unfrozen for Fern to overwrite] -src/deepgram/types/listen_v1response_results_utterances_item.py.bak -src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak -src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak +src/deepgram/types/listen_v1response_results_utterances_item.py +src/deepgram/types/listen_v1response_results_utterances_item_words_item.py +src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py # Hand-written custom tests tests/custom/test_text_builder.py diff --git a/src/deepgram/agent/v1/socket_client.py.bak b/src/deepgram/agent/v1/socket_client.py.bak deleted file mode 100644 index ce3e4aa1..00000000 --- a/src/deepgram/agent/v1/socket_client.py.bak +++ /dev/null @@ -1,342 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import json -import logging -import typing -from json.decoder import JSONDecodeError - -import websockets -import websockets.sync.connection as websockets_sync_connection -from ...core.events import EventEmitterMixin, EventType -from ...core.unchecked_base_model import construct_type -from .types.agent_v1agent_audio_done import AgentV1AgentAudioDone -from .types.agent_v1agent_started_speaking import AgentV1AgentStartedSpeaking -from .types.agent_v1agent_thinking import AgentV1AgentThinking -from .types.agent_v1conversation_text import AgentV1ConversationText -from .types.agent_v1error import AgentV1Error -from .types.agent_v1function_call_request import AgentV1FunctionCallRequest -from .types.agent_v1inject_agent_message import AgentV1InjectAgentMessage -from .types.agent_v1inject_user_message import AgentV1InjectUserMessage -from .types.agent_v1injection_refused import AgentV1InjectionRefused -from .types.agent_v1keep_alive import AgentV1KeepAlive -from .types.agent_v1prompt_updated import AgentV1PromptUpdated -from .types.agent_v1receive_function_call_response import AgentV1ReceiveFunctionCallResponse -from .types.agent_v1send_function_call_response import AgentV1SendFunctionCallResponse -from .types.agent_v1settings import AgentV1Settings -from .types.agent_v1settings_applied import AgentV1SettingsApplied -from .types.agent_v1speak_updated import AgentV1SpeakUpdated -from .types.agent_v1update_prompt import AgentV1UpdatePrompt -from .types.agent_v1update_speak import AgentV1UpdateSpeak -from .types.agent_v1user_started_speaking import AgentV1UserStartedSpeaking -from .types.agent_v1warning import AgentV1Warning -from .types.agent_v1welcome import AgentV1Welcome - -try: - from websockets.legacy.client import WebSocketClientProtocol # type: ignore -except ImportError: - from websockets import WebSocketClientProtocol # type: ignore - -_logger = logging.getLogger(__name__) - - -def _sanitize_numeric_types(obj: typing.Any) -> typing.Any: - """ - Recursively convert float values that are whole numbers to int. - - Workaround for Fern-generated models that type integer API fields - (like sample_rate) as float, causing JSON serialization to produce - values like 44100.0 instead of 44100. The Deepgram API rejects - float representations of integer fields. - - See: https://github.com/deepgram/internal-api-specs/issues/205 - """ - if isinstance(obj, dict): - return {k: _sanitize_numeric_types(v) for k, v in obj.items()} - elif isinstance(obj, list): - return [_sanitize_numeric_types(item) for item in obj] - elif isinstance(obj, float) and obj.is_integer(): - return int(obj) - return obj -V1SocketClientResponse = typing.Union[ - AgentV1ReceiveFunctionCallResponse, - AgentV1PromptUpdated, - AgentV1SpeakUpdated, - AgentV1InjectionRefused, - AgentV1Welcome, - AgentV1SettingsApplied, - AgentV1ConversationText, - AgentV1UserStartedSpeaking, - AgentV1AgentThinking, - AgentV1FunctionCallRequest, - AgentV1AgentStartedSpeaking, - AgentV1AgentAudioDone, - AgentV1Error, - AgentV1Warning, - bytes, -] - - -class AsyncV1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: WebSocketClientProtocol): - super().__init__() - self._websocket = websocket - - async def __aiter__(self): - async for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - async def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - await self._emit_async(EventType.OPEN, None) - try: - async for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: - await self._emit_async(EventType.ERROR, exc) - finally: - await self._emit_async(EventType.CLOSE, None) - - async def send_settings(self, message: AgentV1Settings) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1Settings. - """ - await self._send_model(message) - - async def send_update_speak(self, message: AgentV1UpdateSpeak) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1UpdateSpeak. - """ - await self._send_model(message) - - async def send_inject_user_message(self, message: AgentV1InjectUserMessage) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1InjectUserMessage. - """ - await self._send_model(message) - - async def send_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1InjectAgentMessage. - """ - await self._send_model(message) - - async def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1SendFunctionCallResponse. - """ - await self._send_model(message) - - async def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1KeepAlive. - """ - await self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) - - async def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1UpdatePrompt. - """ - await self._send_model(message) - - async def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - await self._send(message) - - async def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = await self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - async def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - await self._websocket.send(data) - - async def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - await self._send(_sanitize_numeric_types(data.dict())) - - -class V1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): - super().__init__() - self._websocket = websocket - - def __iter__(self): - for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - self._emit(EventType.OPEN, None) - try: - for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - self._emit(EventType.MESSAGE, parsed) - except Exception as exc: - self._emit(EventType.ERROR, exc) - finally: - self._emit(EventType.CLOSE, None) - - def send_settings(self, message: AgentV1Settings) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1Settings. - """ - self._send_model(message) - - def send_update_speak(self, message: AgentV1UpdateSpeak) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1UpdateSpeak. - """ - self._send_model(message) - - def send_inject_user_message(self, message: AgentV1InjectUserMessage) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1InjectUserMessage. - """ - self._send_model(message) - - def send_inject_agent_message(self, message: AgentV1InjectAgentMessage) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1InjectAgentMessage. - """ - self._send_model(message) - - def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1SendFunctionCallResponse. - """ - self._send_model(message) - - def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1KeepAlive. - """ - self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) - - def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a AgentV1UpdatePrompt. - """ - self._send_model(message) - - def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - self._send(message) - - def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - self._websocket.send(data) - - def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - self._send(_sanitize_numeric_types(data.dict())) diff --git a/src/deepgram/listen/v1/socket_client.py.bak b/src/deepgram/listen/v1/socket_client.py.bak deleted file mode 100644 index 387c107f..00000000 --- a/src/deepgram/listen/v1/socket_client.py.bak +++ /dev/null @@ -1,236 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import json -import logging -import typing -from json.decoder import JSONDecodeError - -import websockets -import websockets.sync.connection as websockets_sync_connection -from ...core.events import EventEmitterMixin, EventType -from ...core.unchecked_base_model import construct_type -from .types.listen_v1close_stream import ListenV1CloseStream -from .types.listen_v1finalize import ListenV1Finalize -from .types.listen_v1keep_alive import ListenV1KeepAlive -from .types.listen_v1metadata import ListenV1Metadata -from .types.listen_v1results import ListenV1Results -from .types.listen_v1speech_started import ListenV1SpeechStarted -from .types.listen_v1utterance_end import ListenV1UtteranceEnd - -try: - from websockets.legacy.client import WebSocketClientProtocol # type: ignore -except ImportError: - from websockets import WebSocketClientProtocol # type: ignore - -_logger = logging.getLogger(__name__) -V1SocketClientResponse = typing.Union[ListenV1Results, ListenV1Metadata, ListenV1UtteranceEnd, ListenV1SpeechStarted] - - -class AsyncV1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: WebSocketClientProtocol): - super().__init__() - self._websocket = websocket - - async def __aiter__(self): - async for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - async def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - await self._emit_async(EventType.OPEN, None) - try: - async for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: - await self._emit_async(EventType.ERROR, exc) - finally: - await self._emit_async(EventType.CLOSE, None) - - async def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - await self._send(message) - - async def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1Finalize. - """ - await self._send_model(message or ListenV1Finalize(type="Finalize")) - - async def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1CloseStream. - """ - await self._send_model(message or ListenV1CloseStream(type="CloseStream")) - - async def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1KeepAlive. - """ - await self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) - - async def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = await self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - async def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - await self._websocket.send(data) - - async def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - await self._send(data.dict()) - - -class V1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): - super().__init__() - self._websocket = websocket - - def __iter__(self): - for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - self._emit(EventType.OPEN, None) - try: - for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - self._emit(EventType.MESSAGE, parsed) - except Exception as exc: - self._emit(EventType.ERROR, exc) - finally: - self._emit(EventType.CLOSE, None) - - def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - self._send(message) - - def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1Finalize. - """ - self._send_model(message or ListenV1Finalize(type="Finalize")) - - def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1CloseStream. - """ - self._send_model(message or ListenV1CloseStream(type="CloseStream")) - - def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV1KeepAlive. - """ - self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) - - def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - self._websocket.send(data) - - def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - self._send(data.dict()) diff --git a/src/deepgram/listen/v2/socket_client.py.bak b/src/deepgram/listen/v2/socket_client.py.bak deleted file mode 100644 index 4bf24c36..00000000 --- a/src/deepgram/listen/v2/socket_client.py.bak +++ /dev/null @@ -1,205 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import json -import logging -import typing -from json.decoder import JSONDecodeError - -import websockets -import websockets.sync.connection as websockets_sync_connection -from ...core.events import EventEmitterMixin, EventType -from ...core.unchecked_base_model import construct_type -from .types.listen_v2close_stream import ListenV2CloseStream -from .types.listen_v2connected import ListenV2Connected -from .types.listen_v2fatal_error import ListenV2FatalError -from .types.listen_v2turn_info import ListenV2TurnInfo - -try: - from websockets.legacy.client import WebSocketClientProtocol # type: ignore -except ImportError: - from websockets import WebSocketClientProtocol # type: ignore - -_logger = logging.getLogger(__name__) -V2SocketClientResponse = typing.Union[ListenV2Connected, ListenV2TurnInfo, ListenV2FatalError] - - -class AsyncV2SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: WebSocketClientProtocol): - super().__init__() - self._websocket = websocket - - async def __aiter__(self): - async for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - async def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - await self._emit_async(EventType.OPEN, None) - try: - async for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: - await self._emit_async(EventType.ERROR, exc) - finally: - await self._emit_async(EventType.CLOSE, None) - - async def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - await self._send(message) - - async def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV2CloseStream. - """ - await self._send_model(message or ListenV2CloseStream(type="CloseStream")) - - async def recv(self) -> V2SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = await self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - async def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - await self._websocket.send(data) - - async def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - await self._send(data.dict()) - - -class V2SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): - super().__init__() - self._websocket = websocket - - def __iter__(self): - for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - self._emit(EventType.OPEN, None) - try: - for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - self._emit(EventType.MESSAGE, parsed) - except Exception as exc: - self._emit(EventType.ERROR, exc) - finally: - self._emit(EventType.CLOSE, None) - - def send_media(self, message: bytes) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a bytes. - """ - self._send(message) - - def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a ListenV2CloseStream. - """ - self._send_model(message or ListenV2CloseStream(type="CloseStream")) - - def recv(self) -> V2SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - self._websocket.send(data) - - def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - self._send(data.dict()) diff --git a/src/deepgram/speak/v1/socket_client.py.bak b/src/deepgram/speak/v1/socket_client.py.bak deleted file mode 100644 index 671e0bd2..00000000 --- a/src/deepgram/speak/v1/socket_client.py.bak +++ /dev/null @@ -1,237 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import json -import logging -import typing -from json.decoder import JSONDecodeError - -import websockets -import websockets.sync.connection as websockets_sync_connection -from ...core.events import EventEmitterMixin, EventType -from ...core.unchecked_base_model import construct_type -from .types.speak_v1clear import SpeakV1Clear -from .types.speak_v1cleared import SpeakV1Cleared -from .types.speak_v1close import SpeakV1Close -from .types.speak_v1flush import SpeakV1Flush -from .types.speak_v1flushed import SpeakV1Flushed -from .types.speak_v1metadata import SpeakV1Metadata -from .types.speak_v1text import SpeakV1Text -from .types.speak_v1warning import SpeakV1Warning - -try: - from websockets.legacy.client import WebSocketClientProtocol # type: ignore -except ImportError: - from websockets import WebSocketClientProtocol # type: ignore - -_logger = logging.getLogger(__name__) -V1SocketClientResponse = typing.Union[bytes, SpeakV1Metadata, SpeakV1Flushed, SpeakV1Cleared, SpeakV1Warning] - - -class AsyncV1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: WebSocketClientProtocol): - super().__init__() - self._websocket = websocket - - async def __aiter__(self): - async for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - async def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - await self._emit_async(EventType.OPEN, None) - try: - async for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - await self._emit_async(EventType.MESSAGE, parsed) - except Exception as exc: - await self._emit_async(EventType.ERROR, exc) - finally: - await self._emit_async(EventType.CLOSE, None) - - async def send_text(self, message: SpeakV1Text) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Text. - """ - await self._send_model(message) - - async def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Flush. - """ - await self._send_model(message or SpeakV1Flush(type="Flush")) - - async def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Clear. - """ - await self._send_model(message or SpeakV1Clear(type="Clear")) - - async def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Close. - """ - await self._send_model(message or SpeakV1Close(type="Close")) - - async def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = await self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - async def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - await self._websocket.send(data) - - async def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - await self._send(data.dict()) - - -class V1SocketClient(EventEmitterMixin): - def __init__(self, *, websocket: websockets_sync_connection.Connection): - super().__init__() - self._websocket = websocket - - def __iter__(self): - for message in self._websocket: - if isinstance(message, bytes): - yield message - else: - try: - yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - - def start_listening(self): - """ - Start listening for messages on the websocket connection. - - Emits events in the following order: - - EventType.OPEN when connection is established - - EventType.MESSAGE for each message received - - EventType.ERROR if an error occurs - - EventType.CLOSE when connection is closed - """ - self._emit(EventType.OPEN, None) - try: - for raw_message in self._websocket: - if isinstance(raw_message, bytes): - parsed = raw_message - else: - json_data = json.loads(raw_message) - try: - parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning( - "Skipping unknown WebSocket message; update your SDK version to support new message types." - ) - continue - self._emit(EventType.MESSAGE, parsed) - except Exception as exc: - self._emit(EventType.ERROR, exc) - finally: - self._emit(EventType.CLOSE, None) - - def send_text(self, message: SpeakV1Text) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Text. - """ - self._send_model(message) - - def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Flush. - """ - self._send_model(message or SpeakV1Flush(type="Flush")) - - def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Clear. - """ - self._send_model(message or SpeakV1Clear(type="Clear")) - - def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: - """ - Send a message to the websocket connection. - The message will be sent as a SpeakV1Close. - """ - self._send_model(message or SpeakV1Close(type="Close")) - - def recv(self) -> V1SocketClientResponse: - """ - Receive a message from the websocket connection. - """ - data = self._websocket.recv() - if isinstance(data, bytes): - return data # type: ignore - json_data = json.loads(data) - try: - return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore - except Exception: - _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") - return json_data # type: ignore - - def _send(self, data: typing.Any) -> None: - """ - Send a message to the websocket connection. - """ - if isinstance(data, dict): - data = json.dumps(data) - self._websocket.send(data) - - def _send_model(self, data: typing.Any) -> None: - """ - Send a Pydantic model to the websocket connection. - """ - self._send(data.dict()) diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak deleted file mode 100644 index eec3866b..00000000 --- a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py.bak +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item_sentences_item import ( - ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem, -) - - -class ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItem(UncheckedBaseModel): - sentences: typing.Optional[ - typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem] - ] = None - speaker: typing.Optional[int] = None - num_words: typing.Optional[int] = None - start: typing.Optional[float] = None - end: typing.Optional[float] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/types/listen_v1response_results_utterances_item.py.bak b/src/deepgram/types/listen_v1response_results_utterances_item.py.bak deleted file mode 100644 index 0947d9f5..00000000 --- a/src/deepgram/types/listen_v1response_results_utterances_item.py.bak +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel -from .listen_v1response_results_utterances_item_words_item import ListenV1ResponseResultsUtterancesItemWordsItem - - -class ListenV1ResponseResultsUtterancesItem(UncheckedBaseModel): - start: typing.Optional[float] = None - end: typing.Optional[float] = None - confidence: typing.Optional[float] = None - channel: typing.Optional[int] = None - transcript: typing.Optional[str] = None - words: typing.Optional[typing.List[ListenV1ResponseResultsUtterancesItemWordsItem]] = None - speaker: typing.Optional[int] = None - id: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak deleted file mode 100644 index 6cd1313a..00000000 --- a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py.bak +++ /dev/null @@ -1,26 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.unchecked_base_model import UncheckedBaseModel - - -class ListenV1ResponseResultsUtterancesItemWordsItem(UncheckedBaseModel): - word: typing.Optional[str] = None - start: typing.Optional[float] = None - end: typing.Optional[float] = None - confidence: typing.Optional[float] = None - speaker: typing.Optional[int] = None - speaker_confidence: typing.Optional[float] = None - punctuated_word: typing.Optional[str] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow From eb2c96351da92297da16decc11243c4a3cd04fbc Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 13:18:47 +0100 Subject: [PATCH 5/8] fix(websockets): re-apply manual patches after regen --- src/deepgram/agent/v1/socket_client.py | 50 +++++++++++++++++-------- src/deepgram/listen/v1/socket_client.py | 42 ++++++++++----------- src/deepgram/listen/v2/socket_client.py | 26 ++++++------- src/deepgram/speak/v1/socket_client.py | 42 ++++++++++----------- 4 files changed, 86 insertions(+), 74 deletions(-) diff --git a/src/deepgram/agent/v1/socket_client.py b/src/deepgram/agent/v1/socket_client.py index 3d455f7f..21b30ec1 100644 --- a/src/deepgram/agent/v1/socket_client.py +++ b/src/deepgram/agent/v1/socket_client.py @@ -3,9 +3,7 @@ import json import logging import typing -from json.decoder import JSONDecodeError -import websockets import websockets.sync.connection as websockets_sync_connection from ...core.events import EventEmitterMixin, EventType from ...core.unchecked_base_model import construct_type @@ -39,6 +37,26 @@ from websockets import WebSocketClientProtocol # type: ignore _logger = logging.getLogger(__name__) + + +def _sanitize_numeric_types(obj: typing.Any) -> typing.Any: + """ + Recursively convert float values that are whole numbers to int. + + Workaround for Fern-generated models that type integer API fields + (like sample_rate) as float, causing JSON serialization to produce + values like 44100.0 instead of 44100. The Deepgram API rejects + float representations of integer fields. + + See: https://github.com/deepgram/internal-api-specs/issues/205 + """ + if isinstance(obj, dict): + return {k: _sanitize_numeric_types(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [_sanitize_numeric_types(item) for item in obj] + elif isinstance(obj, float) and obj.is_integer(): + return int(obj) + return obj V1SocketClientResponse = typing.Union[ AgentV1ReceiveFunctionCallResponse, AgentV1PromptUpdated, @@ -70,7 +88,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -95,14 +113,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -142,12 +160,12 @@ async def send_function_call_response(self, message: AgentV1SendFunctionCallResp """ await self._send_model(message) - async def send_keep_alive(self, message: AgentV1KeepAlive) -> None: + async def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a AgentV1KeepAlive. """ - await self._send_model(message) + await self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) async def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ @@ -179,7 +197,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -196,7 +214,7 @@ async def _send_model(self, data: typing.Any) -> None: """ Send a Pydantic model to the websocket connection. """ - await self._send(data.dict()) + await self._send(_sanitize_numeric_types(data.dict())) class V1SocketClient(EventEmitterMixin): @@ -210,7 +228,7 @@ def __iter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -235,14 +253,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -282,12 +300,12 @@ def send_function_call_response(self, message: AgentV1SendFunctionCallResponse) """ self._send_model(message) - def send_keep_alive(self, message: AgentV1KeepAlive) -> None: + def send_keep_alive(self, message: typing.Optional[AgentV1KeepAlive] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a AgentV1KeepAlive. """ - self._send_model(message) + self._send_model(message or AgentV1KeepAlive(type="KeepAlive")) def send_update_prompt(self, message: AgentV1UpdatePrompt) -> None: """ @@ -319,7 +337,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -336,4 +354,4 @@ def _send_model(self, data: typing.Any) -> None: """ Send a Pydantic model to the websocket connection. """ - self._send(data.dict()) + self._send(_sanitize_numeric_types(data.dict())) diff --git a/src/deepgram/listen/v1/socket_client.py b/src/deepgram/listen/v1/socket_client.py index 011786d2..5954d116 100644 --- a/src/deepgram/listen/v1/socket_client.py +++ b/src/deepgram/listen/v1/socket_client.py @@ -3,9 +3,7 @@ import json import logging import typing -from json.decoder import JSONDecodeError -import websockets import websockets.sync.connection as websockets_sync_connection from ...core.events import EventEmitterMixin, EventType from ...core.unchecked_base_model import construct_type @@ -37,7 +35,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -62,14 +60,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -81,26 +79,26 @@ async def send_media(self, message: bytes) -> None: """ await self._send(message) - async def send_finalize(self, message: ListenV1Finalize) -> None: + async def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1Finalize. """ - await self._send_model(message) + await self._send_model(message or ListenV1Finalize(type="Finalize")) - async def send_close_stream(self, message: ListenV1CloseStream) -> None: + async def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1CloseStream. """ - await self._send_model(message) + await self._send_model(message or ListenV1CloseStream(type="CloseStream")) - async def send_keep_alive(self, message: ListenV1KeepAlive) -> None: + async def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1KeepAlive. """ - await self._send_model(message) + await self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) async def recv(self) -> V1SocketClientResponse: """ @@ -111,7 +109,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -142,7 +140,7 @@ def __iter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -167,14 +165,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -186,26 +184,26 @@ def send_media(self, message: bytes) -> None: """ self._send(message) - def send_finalize(self, message: ListenV1Finalize) -> None: + def send_finalize(self, message: typing.Optional[ListenV1Finalize] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1Finalize. """ - self._send_model(message) + self._send_model(message or ListenV1Finalize(type="Finalize")) - def send_close_stream(self, message: ListenV1CloseStream) -> None: + def send_close_stream(self, message: typing.Optional[ListenV1CloseStream] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1CloseStream. """ - self._send_model(message) + self._send_model(message or ListenV1CloseStream(type="CloseStream")) - def send_keep_alive(self, message: ListenV1KeepAlive) -> None: + def send_keep_alive(self, message: typing.Optional[ListenV1KeepAlive] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV1KeepAlive. """ - self._send_model(message) + self._send_model(message or ListenV1KeepAlive(type="KeepAlive")) def recv(self) -> V1SocketClientResponse: """ @@ -216,7 +214,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore diff --git a/src/deepgram/listen/v2/socket_client.py b/src/deepgram/listen/v2/socket_client.py index d6dd1e05..d214f0f5 100644 --- a/src/deepgram/listen/v2/socket_client.py +++ b/src/deepgram/listen/v2/socket_client.py @@ -3,9 +3,7 @@ import json import logging import typing -from json.decoder import JSONDecodeError -import websockets import websockets.sync.connection as websockets_sync_connection from ...core.events import EventEmitterMixin, EventType from ...core.unchecked_base_model import construct_type @@ -37,7 +35,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(V2SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -62,14 +60,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V2SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -81,12 +79,12 @@ async def send_media(self, message: bytes) -> None: """ await self._send(message) - async def send_close_stream(self, message: ListenV2CloseStream) -> None: + async def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV2CloseStream. """ - await self._send_model(message) + await self._send_model(message or ListenV2CloseStream(type="CloseStream")) async def send_configure(self, message: typing.Any) -> None: """ @@ -104,7 +102,7 @@ async def recv(self) -> V2SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V2SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -135,7 +133,7 @@ def __iter__(self): yield message else: try: - yield construct_type(V2SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V2SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -160,14 +158,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V2SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -179,12 +177,12 @@ def send_media(self, message: bytes) -> None: """ self._send(message) - def send_close_stream(self, message: ListenV2CloseStream) -> None: + def send_close_stream(self, message: typing.Optional[ListenV2CloseStream] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a ListenV2CloseStream. """ - self._send_model(message) + self._send_model(message or ListenV2CloseStream(type="CloseStream")) def send_configure(self, message: typing.Any) -> None: """ @@ -202,7 +200,7 @@ def recv(self) -> V2SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V2SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V2SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore diff --git a/src/deepgram/speak/v1/socket_client.py b/src/deepgram/speak/v1/socket_client.py index 461c8190..e3c28bb5 100644 --- a/src/deepgram/speak/v1/socket_client.py +++ b/src/deepgram/speak/v1/socket_client.py @@ -3,9 +3,7 @@ import json import logging import typing -from json.decoder import JSONDecodeError -import websockets import websockets.sync.connection as websockets_sync_connection from ...core.events import EventEmitterMixin, EventType from ...core.unchecked_base_model import construct_type @@ -38,7 +36,7 @@ async def __aiter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -63,14 +61,14 @@ async def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue await self._emit_async(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: await self._emit_async(EventType.ERROR, exc) finally: await self._emit_async(EventType.CLOSE, None) @@ -82,26 +80,26 @@ async def send_text(self, message: SpeakV1Text) -> None: """ await self._send_model(message) - async def send_flush(self, message: SpeakV1Flush) -> None: + async def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Flush. """ - await self._send_model(message) + await self._send_model(message or SpeakV1Flush(type="Flush")) - async def send_clear(self, message: SpeakV1Clear) -> None: + async def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Clear. """ - await self._send_model(message) + await self._send_model(message or SpeakV1Clear(type="Clear")) - async def send_close(self, message: SpeakV1Close) -> None: + async def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Close. """ - await self._send_model(message) + await self._send_model(message or SpeakV1Close(type="Close")) async def recv(self) -> V1SocketClientResponse: """ @@ -112,7 +110,7 @@ async def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore @@ -143,7 +141,7 @@ def __iter__(self): yield message else: try: - yield construct_type(V1SocketClientResponse, json.loads(message)) # type: ignore + yield construct_type(type_=V1SocketClientResponse, object_=json.loads(message)) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." @@ -168,14 +166,14 @@ def start_listening(self): else: json_data = json.loads(raw_message) try: - parsed = construct_type(V1SocketClientResponse, json_data) # type: ignore + parsed = construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning( "Skipping unknown WebSocket message; update your SDK version to support new message types." ) continue self._emit(EventType.MESSAGE, parsed) - except (websockets.WebSocketException, JSONDecodeError) as exc: + except Exception as exc: self._emit(EventType.ERROR, exc) finally: self._emit(EventType.CLOSE, None) @@ -187,26 +185,26 @@ def send_text(self, message: SpeakV1Text) -> None: """ self._send_model(message) - def send_flush(self, message: SpeakV1Flush) -> None: + def send_flush(self, message: typing.Optional[SpeakV1Flush] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Flush. """ - self._send_model(message) + self._send_model(message or SpeakV1Flush(type="Flush")) - def send_clear(self, message: SpeakV1Clear) -> None: + def send_clear(self, message: typing.Optional[SpeakV1Clear] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Clear. """ - self._send_model(message) + self._send_model(message or SpeakV1Clear(type="Clear")) - def send_close(self, message: SpeakV1Close) -> None: + def send_close(self, message: typing.Optional[SpeakV1Close] = None) -> None: """ Send a message to the websocket connection. The message will be sent as a SpeakV1Close. """ - self._send_model(message) + self._send_model(message or SpeakV1Close(type="Close")) def recv(self) -> V1SocketClientResponse: """ @@ -217,7 +215,7 @@ def recv(self) -> V1SocketClientResponse: return data # type: ignore json_data = json.loads(data) try: - return construct_type(V1SocketClientResponse, json_data) # type: ignore + return construct_type(type_=V1SocketClientResponse, object_=json_data) # type: ignore except Exception: _logger.warning("Skipping unknown WebSocket message; update your SDK version to support new message types.") return json_data # type: ignore From b682eaca034de785c4e0d597dc934c0281c96399 Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 13:19:32 +0100 Subject: [PATCH 6/8] fix(types): re-apply float to int corrections after regen --- ...nnels_item_alternatives_item_paragraphs_paragraphs_item.py | 4 ++-- .../types/listen_v1response_results_utterances_item.py | 4 ++-- .../listen_v1response_results_utterances_item_words_item.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py index a622d4ae..eec3866b 100644 --- a/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py +++ b/src/deepgram/types/listen_v1response_results_channels_item_alternatives_item_paragraphs_paragraphs_item.py @@ -14,8 +14,8 @@ class ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsIte sentences: typing.Optional[ typing.List[ListenV1ResponseResultsChannelsItemAlternativesItemParagraphsParagraphsItemSentencesItem] ] = None - speaker: typing.Optional[float] = None - num_words: typing.Optional[float] = None + speaker: typing.Optional[int] = None + num_words: typing.Optional[int] = None start: typing.Optional[float] = None end: typing.Optional[float] = None diff --git a/src/deepgram/types/listen_v1response_results_utterances_item.py b/src/deepgram/types/listen_v1response_results_utterances_item.py index ed8a8ddd..0947d9f5 100644 --- a/src/deepgram/types/listen_v1response_results_utterances_item.py +++ b/src/deepgram/types/listen_v1response_results_utterances_item.py @@ -12,10 +12,10 @@ class ListenV1ResponseResultsUtterancesItem(UncheckedBaseModel): start: typing.Optional[float] = None end: typing.Optional[float] = None confidence: typing.Optional[float] = None - channel: typing.Optional[float] = None + channel: typing.Optional[int] = None transcript: typing.Optional[str] = None words: typing.Optional[typing.List[ListenV1ResponseResultsUtterancesItemWordsItem]] = None - speaker: typing.Optional[float] = None + speaker: typing.Optional[int] = None id: typing.Optional[str] = None if IS_PYDANTIC_V2: diff --git a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py index 716f2e2a..6cd1313a 100644 --- a/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py +++ b/src/deepgram/types/listen_v1response_results_utterances_item_words_item.py @@ -12,7 +12,7 @@ class ListenV1ResponseResultsUtterancesItemWordsItem(UncheckedBaseModel): start: typing.Optional[float] = None end: typing.Optional[float] = None confidence: typing.Optional[float] = None - speaker: typing.Optional[float] = None + speaker: typing.Optional[int] = None speaker_confidence: typing.Optional[float] = None punctuated_word: typing.Optional[str] = None From d04d3db076a66e3de0d63aa15797fb0836b49a14 Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 13:19:51 +0100 Subject: [PATCH 7/8] fix(tests): update wiremock datetime format and speak generate tests --- tests/manual/speak/v1/audio/generate/async.py | 8 +++----- tests/manual/speak/v1/audio/generate/main.py | 6 ++---- wiremock/wiremock-mappings.json | 2 +- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/manual/speak/v1/audio/generate/async.py b/tests/manual/speak/v1/audio/generate/async.py index 22c7b840..ff1d2094 100644 --- a/tests/manual/speak/v1/audio/generate/async.py +++ b/tests/manual/speak/v1/audio/generate/async.py @@ -18,14 +18,12 @@ async def main() -> None: try: text = "Hello, this is a sample text to speech conversion." print(f"Sending async text-to-speech generation request - Text: {text[:50]}...") - response = await client.speak.v1.audio.generate( + response = client.speak.v1.audio.generate( text=text, ) print("Response received successfully") - print(f"Response type: {type(response)}") - if hasattr(response, "audio"): - print(f"Audio data length: {len(response.audio) if response.audio else 0} bytes") - print(f"Response body: {response}") + audio_bytes = b"".join([chunk async for chunk in response]) + print(f"Audio data length: {len(audio_bytes)} bytes") except Exception as e: print(f"Error occurred: {type(e).__name__}") # Log request headers if available diff --git a/tests/manual/speak/v1/audio/generate/main.py b/tests/manual/speak/v1/audio/generate/main.py index c2307a1c..8bf00e91 100644 --- a/tests/manual/speak/v1/audio/generate/main.py +++ b/tests/manual/speak/v1/audio/generate/main.py @@ -19,10 +19,8 @@ text=text, ) print("Response received successfully") - print(f"Response type: {type(response)}") - if hasattr(response, "audio"): - print(f"Audio data length: {len(response.audio) if response.audio else 0} bytes") - print(f"Response body: {response}") + audio_bytes = b"".join(response) + print(f"Audio data length: {len(audio_bytes)} bytes") except Exception as e: print(f"Error occurred: {type(e).__name__}") # Log request headers if available diff --git a/wiremock/wiremock-mappings.json b/wiremock/wiremock-mappings.json index 630c6a96..05e23bc8 100644 --- a/wiremock/wiremock-mappings.json +++ b/wiremock/wiremock-mappings.json @@ -1 +1 @@ -{"mappings":[{"id":"533b5d52-ab21-4763-aaae-87cf52f49aa5","name":"List Agent Think Models - default","request":{"urlPathTemplate":"/v1/agent/settings/think/models","method":"GET"},"response":{"status":200,"body":"{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"name\",\n \"provider\": \"open_ai\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"533b5d52-ab21-4763-aaae-87cf52f49aa5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"20e1029e-8bb9-4092-a809-b943e60822ef","name":"Token-based Authentication - default","request":{"urlPathTemplate":"/v1/auth/grant","method":"POST"},"response":{"status":200,"body":"{\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.dozjgNryP4J3jVmNHl0w5N_XgL0n3I9PlFUP0THsR8U\",\n \"expires_in\": 30\n}","headers":{"Content-Type":"application/json"}},"uuid":"20e1029e-8bb9-4092-a809-b943e60822ef","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"49d8d51a-7f01-4598-804f-b6f54cdc22da","name":"Transcribe and analyze pre-recorded audio and video - default","request":{"urlPathTemplate":"/v1/listen","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"extra":{"equalTo":"extra"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"linear16"},"filler_words":{"equalTo":"true"},"keywords":{"equalTo":"keywords"},"language":{"equalTo":"language"},"measurements":{"equalTo":"true"},"model":{"equalTo":"nova-3"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"redact"},"replace":{"equalTo":"replace"},"search":{"equalTo":"search"},"smart_format":{"equalTo":"true"},"utterances":{"equalTo":"true"},"utt_split":{"equalTo":"1.1"},"version":{"equalTo":"latest"},"mip_opt_out":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"request_id\": \"a847f427-4ad5-4d67-9b95-db801e58251c\",\n \"sha256\": \"154e291ecfa8be6ab8343560bcc109008fa7853eb5372533e8efdefc9b504c33\",\n \"created\": \"2024-05-12T18:57:13Z\",\n \"duration\": 25.933313,\n \"channels\": 1,\n \"models\": [\n \"30089e05-99d1-4376-b32e-c263170674af\"\n ],\n \"model_info\": {\n \"30089e05-99d1-4376-b32e-c263170674af\": {\n \"name\": \"2-general-nova\",\n \"version\": \"2024-01-09.29447\",\n \"arch\": \"nova-2\"\n }\n },\n \"summary_info\": {\n \"model_uuid\": \"67875a7f-c9c4-48a0-aa55-5bdb8a91c34a\",\n \"input_tokens\": 95,\n \"output_tokens\": 63\n },\n \"sentiment_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 105\n },\n \"topics_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 7\n },\n \"intents_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 4\n },\n \"tags\": [\n \"test\"\n ]\n },\n \"results\": {\n \"channels\": [\n {}\n ],\n \"utterances\": [\n {}\n ],\n \"summary\": {\n \"result\": \"success\",\n \"short\": \"Speaker 0 discusses the significance of the first all-female spacewalk with an all-female team, stating that it is a tribute to the skilled and qualified women who were denied opportunities in the past.\"\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"49d8d51a-7f01-4598-804f-b6f54cdc22da","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0a0be61b-f024-4120-9c54-23bca3e07c93","name":"List Models - default","request":{"urlPathTemplate":"/v1/models","method":"GET","queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"0a0be61b-f024-4120-9c54-23bca3e07c93","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","name":"Get a specific Model - default","request":{"urlPathTemplate":"/v1/models/{model_id}","method":"GET","pathParameters":{"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"0f94d3ea-43b6-4a1a-bce4-ab05b85440ae","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d08cac56-bc4d-4756-8fd1-d508914334d5","name":"List Projects - default","request":{"urlPathTemplate":"/v1/projects","method":"GET"},"response":{"status":200,"body":"{\n \"projects\": [\n {\n \"project_id\": \"project_id\",\n \"name\": \"name\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d08cac56-bc4d-4756-8fd1-d508914334d5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}},"postServeActions":[]},{"id":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","name":"Get a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"project_id\": \"project_id\",\n \"mip_opt_out\": true,\n \"name\": \"name\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"6f0163a8-530c-4e25-bbe0-9ca86b9525dc","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"68918577-6401-4439-8533-356257ff7bcf","name":"Delete a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"68918577-6401-4439-8533-356257ff7bcf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"659fc38b-3934-4e43-93bf-d331f547449e","name":"Update a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}","method":"PATCH","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"Successfully updated project info.\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"659fc38b-3934-4e43-93bf-d331f547449e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","name":"Leave a Project - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/leave","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","name":"List Project Keys - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"status":{"equalTo":"active"}}},"response":{"status":200,"body":"{\n \"api_keys\": [\n {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\"\n },\n \"api_key\": {\n \"api_key_id\": \"1234567890abcdef1234567890abcdef\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"032d9b1c-3b87-40fb-bfab-8c5be92a5d71","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"0167c735-0b6f-4715-8df8-32300d4dae72","name":"Create a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys","method":"POST","pathParameters":{"project_id":{"equalTo":"project_id"}}},"response":{"status":200,"body":"{\n \"api_key_id\": \"api_key_id\",\n \"key\": \"key\",\n \"comment\": \"comment\",\n \"scopes\": [\n \"scopes\",\n \"scopes\"\n ],\n \"tags\": [\n \"tags\",\n \"tags\"\n ],\n \"expiration_date\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"0167c735-0b6f-4715-8df8-32300d4dae72","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"c9812dd3-f87e-4798-aec3-af0933330dd5","name":"Get a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"item\": {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"api_key\": {\n \"api_key_id\": \"1000-2000-3000-4000\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"tags\": [\n \"prod\",\n \"west-region\"\n ],\n \"expiration_date\": \"2021-01-01T00:00:00Z\",\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"c9812dd3-f87e-4798-aec3-af0933330dd5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","name":"Delete a Project Key - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/keys/{key_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"key_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"91e103d5-72f7-463d-840d-310069e33de9","name":"List Project Members - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"members\": [\n {\n \"member_id\": \"member_id\",\n \"email\": \"email\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"91e103d5-72f7-463d-840d-310069e33de9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","name":"Delete a Project Member - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"515c6f7e-09c3-43ea-ad6c-65bc11d20f46","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a920ad0e-2796-4361-ac16-ac83fb75e32a","name":"List Project Models - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"include_outdated":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"a920ad0e-2796-4361-ac16-ac83fb75e32a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","name":"Get a Project Model - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/models/{model_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"model_id":{"equalTo":"af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}},"response":{"status":200,"body":"{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}","headers":{"Content-Type":"application/json"}},"uuid":"9f8c6bf2-ebee-4956-b39f-0291b9d64b6e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","name":"List Project Requests - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"2024-01-15T09:30:00.000Z"},"end":{"equalTo":"2024-01-15T09:30:00.000Z"},"limit":{"equalTo":"1.1"},"page":{"equalTo":"1.1"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"request_id":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"endpoint":{"equalTo":"listen"},"method":{"equalTo":"sync"},"status":{"equalTo":"succeeded"}}},"response":{"status":200,"body":"{\n \"page\": 1.1,\n \"limit\": 1.1,\n \"requests\": [\n {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6a14959-05fa-4aec-9f0c-ba2a817c66e5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"733e39aa-d3ef-4ea7-8062-af080c6288c4","name":"Get a Project Request - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/requests/{request_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"request_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"request\": {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"733e39aa-d3ef-4ea7-8062-af080c6288c4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","name":"Get Project Usage - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2024-10-16\",\n \"end\": \"2024-10-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"6309dd55-c993-4ce1-b0b2-01a41c9f08d6","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b132121b-4efe-42ad-a268-8acac35c189b","name":"Get Project Balances - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balances\": [\n {\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"b132121b-4efe-42ad-a268-8acac35c189b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4019c244-52d3-4d57-902c-af837631650a","name":"Get a Project Balance - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/balances/{balance_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"balance_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"4019c244-52d3-4d57-902c-af837631650a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"555b6751-587f-400c-bf5e-400e108ad6b4","name":"Get Project Billing Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"deployment":{"equalTo":"hosted"},"tag":{"equalTo":"tag1"},"line_item":{"equalTo":"streaming::nova-3"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"dollars\": 0.25,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"deployment\": \"hosted\",\n \"line_item\": \"streaming::nova-3\",\n \"tags\": [\n \"tag1\",\n \"tag2\"\n ]\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"555b6751-587f-400c-bf5e-400e108ad6b4","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a61ae38c-e41f-4726-a55c-88f2135897be","name":"List Project Billing Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/billing/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"accessors\": [\n \"12345678-1234-1234-1234-123456789012\",\n \"87654321-4321-4321-4321-210987654321\"\n ],\n \"deployments\": [\n \"hosted\",\n \"self-hosted\"\n ],\n \"tags\": [\n \"dev\",\n \"production\"\n ],\n \"line_items\": {\n \"streaming::nova-3\": \"Nova - 3 (Stream)\",\n \"sync::aura-2\": \"Aura -2 (Sync)\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a61ae38c-e41f-4726-a55c-88f2135897be","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","name":"List Project Purchases - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/purchases","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"limit":{"equalTo":"1.1"}}},"response":{"status":200,"body":"{\n \"orders\": [\n {\n \"order_id\": \"025e19ba-b6d9-4a04-9f99-4fe715aca5f1\",\n \"expiration\": \"2026-03-04T00:00:00Z\",\n \"created\": \"2023-02-21T21:13:40Z\",\n \"amount\": 150,\n \"units\": \"usd\",\n \"order_type\": \"promotional\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"85b4373c-ba39-41b1-84e8-ae1ee6b180ca","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2dd14c67-ed4e-4d97-9636-0a712899deb8","name":"List Project Invites - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"invites\": [\n {\n \"email\": \"email\",\n \"scope\": \"scope\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"2dd14c67-ed4e-4d97-9636-0a712899deb8","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"7c109496-adfe-4e85-b007-a6f799ee95cb","name":"Create a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"7c109496-adfe-4e85-b007-a6f799ee95cb","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"d6d268d0-d91e-4a65-80e0-339621173db9","name":"Delete a Project Invite - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/invites/{email}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"email":{"equalTo":"john.doe@example.com"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"d6d268d0-d91e-4a65-80e0-339621173db9","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"1b965d71-c930-4a0b-90f3-2289f80f3634","name":"List Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"scopes\": [\n \"scopes\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"1b965d71-c930-4a0b-90f3-2289f80f3634","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"eb2f5de2-b887-47be-abd5-7cb702aca55d","name":"Update Project Member Scopes - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/members/{member_id}/scopes","method":"PUT","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"member_id":{"equalTo":"123456789012345678901234"}}},"response":{"status":200,"body":"{\n \"message\": \"message\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"eb2f5de2-b887-47be-abd5-7cb702aca55d","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9bdf51a4-1e10-41b8-8de2-2df650562db3","name":"Get Project Usage Breakdown - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/breakdown","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"},"grouping":{"equalTo":"accessor"},"accessor":{"equalTo":"12345678-1234-1234-1234-123456789012"},"alternatives":{"equalTo":"true"},"callback_method":{"equalTo":"true"},"callback":{"equalTo":"true"},"channels":{"equalTo":"true"},"custom_intent_mode":{"equalTo":"true"},"custom_intent":{"equalTo":"true"},"custom_topic_mode":{"equalTo":"true"},"custom_topic":{"equalTo":"true"},"deployment":{"equalTo":"hosted"},"detect_entities":{"equalTo":"true"},"detect_language":{"equalTo":"true"},"diarize":{"equalTo":"true"},"dictation":{"equalTo":"true"},"encoding":{"equalTo":"true"},"endpoint":{"equalTo":"listen"},"extra":{"equalTo":"true"},"filler_words":{"equalTo":"true"},"intents":{"equalTo":"true"},"keyterm":{"equalTo":"true"},"keywords":{"equalTo":"true"},"language":{"equalTo":"true"},"measurements":{"equalTo":"true"},"method":{"equalTo":"sync"},"model":{"equalTo":"6f548761-c9c0-429a-9315-11a1d28499c8"},"multichannel":{"equalTo":"true"},"numerals":{"equalTo":"true"},"paragraphs":{"equalTo":"true"},"profanity_filter":{"equalTo":"true"},"punctuate":{"equalTo":"true"},"redact":{"equalTo":"true"},"replace":{"equalTo":"true"},"sample_rate":{"equalTo":"true"},"search":{"equalTo":"true"},"sentiment":{"equalTo":"true"},"smart_format":{"equalTo":"true"},"summarize":{"equalTo":"true"},"tag":{"equalTo":"tag1"},"topics":{"equalTo":"true"},"utt_split":{"equalTo":"true"},"utterances":{"equalTo":"true"},"version":{"equalTo":"true"}}},"response":{"status":200,"body":"{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"hours\": 1619.7242069444444,\n \"total_hours\": 1621.7395791666668,\n \"agent_hours\": 41.33564388888889,\n \"tokens_in\": 0,\n \"tokens_out\": 0,\n \"tts_characters\": 9158866,\n \"requests\": 373381,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"endpoint\": \"listen\",\n \"feature_set\": \"punctuate\",\n \"models\": [\n \"Nova-2\"\n ],\n \"method\": \"async\",\n \"tags\": \"tag1\",\n \"deployment\": \"self-hosted\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"9bdf51a4-1e10-41b8-8de2-2df650562db3","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","name":"List Project Usage Fields - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/usage/fields","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"start":{"equalTo":"start"},"end":{"equalTo":"end"}}},"response":{"status":200,"body":"{\n \"tags\": [\n \"tag=dev\",\n \"tag=production\"\n ],\n \"models\": [\n {\n \"name\": \"2-medical-nova\",\n \"language\": \"en-MY\",\n \"version\": \"2024-05-31.13574\",\n \"model_id\": \"1234567890-12345-67890\"\n }\n ],\n \"processing_methods\": [\n \"sync\",\n \"streaming\"\n ],\n \"features\": [\n \"alternatives\",\n \"detect_entities\",\n \"detect_language\"\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","name":"Analyze text content - default","request":{"urlPathTemplate":"/v1/read","method":"POST","queryParameters":{"callback":{"equalTo":"callback"},"callback_method":{"equalTo":"POST"},"sentiment":{"equalTo":"true"},"summarize":{"equalTo":"v2"},"tag":{"equalTo":"tag"},"topics":{"equalTo":"true"},"custom_topic":{"equalTo":"custom_topic"},"custom_topic_mode":{"equalTo":"extended"},"intents":{"equalTo":"true"},"custom_intent":{"equalTo":"custom_intent"},"custom_intent_mode":{"equalTo":"extended"},"language":{"equalTo":"language"}}},"response":{"status":200,"body":"{\n \"metadata\": {\n \"metadata\": {\n \"request_id\": \"d04af392-db11-4c1d-83e1-20e34f0b8999\",\n \"created\": \"2024-11-18T23:47:44Z\",\n \"language\": \"en\"\n }\n },\n \"results\": {\n \"summary\": {\n \"results\": {\n \"summary\": {\n \"text\": \"The summary of the text submitted.\"\n }\n }\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","name":"List Project Self-Hosted Distribution Credentials - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"distribution_credentials\": [\n {\n \"member\": {\n \"member_id\": \"3376abcd-8e5e-49d3-92d4-876d3a4f0363\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"8b36cfd0-472f-4a21-833f-2d6343c3a2f3\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"4110cb96-50e2-4fe6-b8ae-5d69120cee89","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","name":"Create a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}},"queryParameters":{"provider":{"equalTo":"quay"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"9c12eea9-6ba6-4d70-bb14-a2742cebc114","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a47cd13f-2314-4190-b2c7-20436ccffbd2","name":"Get a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a47cd13f-2314-4190-b2c7-20436ccffbd2","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","name":"Delete a Project Self-Hosted Distribution Credential - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"distribution_credentials_id":{"equalTo":"8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}},"response":{"status":200,"body":"{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"8bd46091-0e57-4b3d-9485-a86e6f1eaf17","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","name":"Text to Speech transformation - default","request":{"urlPathTemplate":"/v1/speak","method":"POST"},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"b06ec977-02ed-41e2-8fff-2bc45cd2166b","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"61883f62-09ec-4be9-b477-13a89b9677cf","name":"List Agent Configurations - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"agents\": [\n {\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"61883f62-09ec-4be9-b477-13a89b9677cf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"a43cdbee-2b2f-47fb-8220-75d337cb6d3a","name":"Create an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents","method":"POST","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n }\n}","headers":{"Content-Type":"application/json"}},"uuid":"a43cdbee-2b2f-47fb-8220-75d337cb6d3a","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2575dd3c-b1c8-4007-8f87-4e32073a9dcf","name":"Get an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"2575dd3c-b1c8-4007-8f87-4e32073a9dcf","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"3b52c271-0e44-4148-b726-6f5ca8c91d26","name":"Update Agent Metadata - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"PUT","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"3b52c271-0e44-4148-b726-6f5ca8c91d26","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"e839386d-beaa-4a7a-a478-ed9d9ab1b63e","name":"Delete an Agent Configuration - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agents/{agent_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"agent_id":{"equalTo":"a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"e839386d-beaa-4a7a-a478-ed9d9ab1b63e","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"bb85071e-3933-4cd3-8fd7-d35eb4c992c5","name":"List Agent Variables - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"}}},"response":{"status":200,"body":"{\n \"variables\": [\n {\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}","headers":{"Content-Type":"application/json"}},"uuid":"bb85071e-3933-4cd3-8fd7-d35eb4c992c5","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"c5016e4b-f863-4dc7-972f-c577ac7fdc47","name":"Create an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables","method":"POST","pathParameters":{"project_id":{"equalTo":"project_id"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"c5016e4b-f863-4dc7-972f-c577ac7fdc47","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"5bb4b688-8475-4911-8ad5-725b7d5338e3","name":"Get an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"GET","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"variable_id":{"equalTo":"v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"5bb4b688-8475-4911-8ad5-725b7d5338e3","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"2e2c7061-7573-423f-a907-2a15dd9f3307","name":"Delete an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"DELETE","pathParameters":{"project_id":{"equalTo":"123456-7890-1234-5678-901234"},"variable_id":{"equalTo":"v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}},"response":{"status":200,"body":"{\n \"key\": \"value\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"2e2c7061-7573-423f-a907-2a15dd9f3307","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}},{"id":"33765621-bc60-4ef4-b295-4b276aeb871f","name":"Update an Agent Variable - default","request":{"urlPathTemplate":"/v1/projects/{project_id}/agent-variables/{variable_id}","method":"PATCH","pathParameters":{"project_id":{"equalTo":"project_id"},"variable_id":{"equalTo":"variable_id"}}},"response":{"status":200,"body":"{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}","headers":{"Content-Type":"application/json"}},"uuid":"33765621-bc60-4ef4-b295-4b276aeb871f","persistent":true,"priority":3,"metadata":{"mocklab":{"created":{"at":"2020-01-01T00:00:00.000Z","via":"SYSTEM"}}}}],"meta":{"total":49}} \ No newline at end of file +{"mappings": [{"id": "533b5d52-ab21-4763-aaae-87cf52f49aa5", "name": "List Agent Think Models - default", "request": {"urlPathTemplate": "/v1/agent/settings/think/models", "method": "GET"}, "response": {"status": 200, "body": "{\n \"models\": [\n {\n \"id\": \"gpt-5\",\n \"name\": \"name\",\n \"provider\": \"open_ai\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "533b5d52-ab21-4763-aaae-87cf52f49aa5", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}, "postServeActions": []}, {"id": "20e1029e-8bb9-4092-a809-b943e60822ef", "name": "Token-based Authentication - default", "request": {"urlPathTemplate": "/v1/auth/grant", "method": "POST"}, "response": {"status": 200, "body": "{\n \"access_token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.dozjgNryP4J3jVmNHl0w5N_XgL0n3I9PlFUP0THsR8U\",\n \"expires_in\": 30\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "20e1029e-8bb9-4092-a809-b943e60822ef", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "49d8d51a-7f01-4598-804f-b6f54cdc22da", "name": "Transcribe and analyze pre-recorded audio and video - default", "request": {"urlPathTemplate": "/v1/listen", "method": "POST", "queryParameters": {"callback": {"equalTo": "callback"}, "callback_method": {"equalTo": "POST"}, "extra": {"equalTo": "extra"}, "sentiment": {"equalTo": "true"}, "summarize": {"equalTo": "v2"}, "tag": {"equalTo": "tag"}, "topics": {"equalTo": "true"}, "custom_topic": {"equalTo": "custom_topic"}, "custom_topic_mode": {"equalTo": "extended"}, "intents": {"equalTo": "true"}, "custom_intent": {"equalTo": "custom_intent"}, "custom_intent_mode": {"equalTo": "extended"}, "detect_entities": {"equalTo": "true"}, "detect_language": {"equalTo": "true"}, "diarize": {"equalTo": "true"}, "dictation": {"equalTo": "true"}, "encoding": {"equalTo": "linear16"}, "filler_words": {"equalTo": "true"}, "keywords": {"equalTo": "keywords"}, "language": {"equalTo": "language"}, "measurements": {"equalTo": "true"}, "model": {"equalTo": "nova-3"}, "multichannel": {"equalTo": "true"}, "numerals": {"equalTo": "true"}, "paragraphs": {"equalTo": "true"}, "profanity_filter": {"equalTo": "true"}, "punctuate": {"equalTo": "true"}, "redact": {"equalTo": "redact"}, "replace": {"equalTo": "replace"}, "search": {"equalTo": "search"}, "smart_format": {"equalTo": "true"}, "utterances": {"equalTo": "true"}, "utt_split": {"equalTo": "1.1"}, "version": {"equalTo": "latest"}, "mip_opt_out": {"equalTo": "true"}}}, "response": {"status": 200, "body": "{\n \"metadata\": {\n \"request_id\": \"a847f427-4ad5-4d67-9b95-db801e58251c\",\n \"sha256\": \"154e291ecfa8be6ab8343560bcc109008fa7853eb5372533e8efdefc9b504c33\",\n \"created\": \"2024-05-12T18:57:13Z\",\n \"duration\": 25.933313,\n \"channels\": 1,\n \"models\": [\n \"30089e05-99d1-4376-b32e-c263170674af\"\n ],\n \"model_info\": {\n \"30089e05-99d1-4376-b32e-c263170674af\": {\n \"name\": \"2-general-nova\",\n \"version\": \"2024-01-09.29447\",\n \"arch\": \"nova-2\"\n }\n },\n \"summary_info\": {\n \"model_uuid\": \"67875a7f-c9c4-48a0-aa55-5bdb8a91c34a\",\n \"input_tokens\": 95,\n \"output_tokens\": 63\n },\n \"sentiment_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 105\n },\n \"topics_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 7\n },\n \"intents_info\": {\n \"model_uuid\": \"80ab3179-d113-4254-bd6b-4a2f96498695\",\n \"input_tokens\": 105,\n \"output_tokens\": 4\n },\n \"tags\": [\n \"test\"\n ]\n },\n \"results\": {\n \"channels\": [\n {}\n ],\n \"utterances\": [\n {}\n ],\n \"summary\": {\n \"result\": \"success\",\n \"short\": \"Speaker 0 discusses the significance of the first all-female spacewalk with an all-female team, stating that it is a tribute to the skilled and qualified women who were denied opportunities in the past.\"\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "49d8d51a-7f01-4598-804f-b6f54cdc22da", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "0a0be61b-f024-4120-9c54-23bca3e07c93", "name": "List Models - default", "request": {"urlPathTemplate": "/v1/models", "method": "GET", "queryParameters": {"include_outdated": {"equalTo": "true"}}}, "response": {"status": 200, "body": "{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "0a0be61b-f024-4120-9c54-23bca3e07c93", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}, "postServeActions": []}, {"id": "0f94d3ea-43b6-4a1a-bce4-ab05b85440ae", "name": "Get a specific Model - default", "request": {"urlPathTemplate": "/v1/models/{model_id}", "method": "GET", "pathParameters": {"model_id": {"equalTo": "af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}}, "response": {"status": 200, "body": "{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "0f94d3ea-43b6-4a1a-bce4-ab05b85440ae", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "d08cac56-bc4d-4756-8fd1-d508914334d5", "name": "List Projects - default", "request": {"urlPathTemplate": "/v1/projects", "method": "GET"}, "response": {"status": 200, "body": "{\n \"projects\": [\n {\n \"project_id\": \"project_id\",\n \"name\": \"name\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "d08cac56-bc4d-4756-8fd1-d508914334d5", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}, "postServeActions": []}, {"id": "6f0163a8-530c-4e25-bbe0-9ca86b9525dc", "name": "Get a Project - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"limit": {"equalTo": "1.1"}, "page": {"equalTo": "1.1"}}}, "response": {"status": 200, "body": "{\n \"project_id\": \"project_id\",\n \"mip_opt_out\": true,\n \"name\": \"name\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "6f0163a8-530c-4e25-bbe0-9ca86b9525dc", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "68918577-6401-4439-8533-356257ff7bcf", "name": "Delete a Project - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "68918577-6401-4439-8533-356257ff7bcf", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "659fc38b-3934-4e43-93bf-d331f547449e", "name": "Update a Project - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}", "method": "PATCH", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"Successfully updated project info.\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "659fc38b-3934-4e43-93bf-d331f547449e", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8", "name": "Leave a Project - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/leave", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "43ae6622-ad2f-4c81-9bc9-a8bbe17ef9d8", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "032d9b1c-3b87-40fb-bfab-8c5be92a5d71", "name": "List Project Keys - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/keys", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"status": {"equalTo": "active"}}}, "response": {"status": 200, "body": "{\n \"api_keys\": [\n {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\"\n },\n \"api_key\": {\n \"api_key_id\": \"1234567890abcdef1234567890abcdef\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "032d9b1c-3b87-40fb-bfab-8c5be92a5d71", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "0167c735-0b6f-4715-8df8-32300d4dae72", "name": "Create a Project Key - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/keys", "method": "POST", "pathParameters": {"project_id": {"equalTo": "project_id"}}}, "response": {"status": 200, "body": "{\n \"api_key_id\": \"api_key_id\",\n \"key\": \"key\",\n \"comment\": \"comment\",\n \"scopes\": [\n \"scopes\",\n \"scopes\"\n ],\n \"tags\": [\n \"tags\",\n \"tags\"\n ],\n \"expiration_date\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "0167c735-0b6f-4715-8df8-32300d4dae72", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "c9812dd3-f87e-4798-aec3-af0933330dd5", "name": "Get a Project Key - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/keys/{key_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "key_id": {"equalTo": "123456789012345678901234"}}}, "response": {"status": 200, "body": "{\n \"item\": {\n \"member\": {\n \"member_id\": \"1000-2000-3000-4000\",\n \"email\": \"john@test.com\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"api_key\": {\n \"api_key_id\": \"1000-2000-3000-4000\",\n \"comment\": \"A comment\",\n \"scopes\": [\n \"admin\"\n ],\n \"tags\": [\n \"prod\",\n \"west-region\"\n ],\n \"expiration_date\": \"2021-01-01T00:00:00Z\",\n \"created\": \"2021-01-01T00:00:00Z\"\n }\n }\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "c9812dd3-f87e-4798-aec3-af0933330dd5", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac", "name": "Delete a Project Key - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/keys/{key_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "key_id": {"equalTo": "123456789012345678901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "3d2fbc7c-7bac-436f-a6ac-abe1b2c2caac", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "91e103d5-72f7-463d-840d-310069e33de9", "name": "List Project Members - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/members", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"members\": [\n {\n \"member_id\": \"member_id\",\n \"email\": \"email\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "91e103d5-72f7-463d-840d-310069e33de9", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "515c6f7e-09c3-43ea-ad6c-65bc11d20f46", "name": "Delete a Project Member - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/members/{member_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "member_id": {"equalTo": "123456789012345678901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "515c6f7e-09c3-43ea-ad6c-65bc11d20f46", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "a920ad0e-2796-4361-ac16-ac83fb75e32a", "name": "List Project Models - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/models", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"include_outdated": {"equalTo": "true"}}}, "response": {"status": 200, "body": "{\n \"stt\": [\n {\n \"name\": \"nova-3\",\n \"canonical_name\": \"nova-3\",\n \"architecture\": \"base\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2021-11-10.1\",\n \"uuid\": \"6b28e919-8427-4f32-9847-492e2efd7daf\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": true\n }\n ],\n \"tts\": [\n {\n \"name\": \"zeus\",\n \"canonical_name\": \"aura-2-zeus-en\",\n \"architecture\": \"aura-2\",\n \"languages\": [\n \"en\",\n \"en-US\"\n ],\n \"version\": \"2025-04-07.0\",\n \"uuid\": \"2baf189d-91ac-481d-b6d1-750888667b31\",\n \"metadata\": {\n \"accent\": \"American\",\n \"age\": \"Adult\",\n \"color\": \"#C58DFF\",\n \"image\": \"https://static.deepgram.com/examples/avatars/zeus.jpg\",\n \"sample\": \"https://static.deepgram.com/examples/Aura-2-zeus.wav\",\n \"tags\": [\n \"masculine\",\n \"deep\",\n \"trustworthy\",\n \"smooth\"\n ],\n \"use_cases\": [\n \"IVR\"\n ]\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "a920ad0e-2796-4361-ac16-ac83fb75e32a", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "9f8c6bf2-ebee-4956-b39f-0291b9d64b6e", "name": "Get a Project Model - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/models/{model_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "model_id": {"equalTo": "af6e9977-99f6-4d8f-b6f5-dfdf6fb6e291"}}}, "response": {"status": 200, "body": "{\n \"name\": \"general\",\n \"canonical_name\": \"enhanced-general\",\n \"architecture\": \"polaris\",\n \"languages\": [\n \"en\",\n \"en-us\"\n ],\n \"version\": \"2022-05-18.1\",\n \"uuid\": \"c7226e9e-ae1c-4057-ae2a-a71a6b0dc588\",\n \"batch\": true,\n \"streaming\": true,\n \"formatted_output\": false\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "9f8c6bf2-ebee-4956-b39f-0291b9d64b6e", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "d6a14959-05fa-4aec-9f0c-ba2a817c66e5", "name": "List Project Requests - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/requests", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "2024-01-15T09:30:00Z"}, "end": {"equalTo": "2024-01-15T09:30:00Z"}, "limit": {"equalTo": "1.1"}, "page": {"equalTo": "1.1"}, "accessor": {"equalTo": "12345678-1234-1234-1234-123456789012"}, "request_id": {"equalTo": "12345678-1234-1234-1234-123456789012"}, "deployment": {"equalTo": "hosted"}, "endpoint": {"equalTo": "listen"}, "method": {"equalTo": "sync"}, "status": {"equalTo": "succeeded"}}}, "response": {"status": 200, "body": "{\n \"page\": 1.1,\n \"limit\": 1.1,\n \"requests\": [\n {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "d6a14959-05fa-4aec-9f0c-ba2a817c66e5", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "733e39aa-d3ef-4ea7-8062-af080c6288c4", "name": "Get a Project Request - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/requests/{request_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "request_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"request\": {\n \"request_id\": \"request_id\",\n \"project_uuid\": \"project_uuid\",\n \"created\": \"2024-01-15T09:30:00Z\",\n \"path\": \"path\",\n \"api_key_id\": \"api_key_id\",\n \"response\": {\n \"key\": \"value\"\n },\n \"code\": 1.1,\n \"deployment\": \"deployment\",\n \"callback\": \"callback\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "733e39aa-d3ef-4ea7-8062-af080c6288c4", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "6309dd55-c993-4ce1-b0b2-01a41c9f08d6", "name": "Get Project Usage - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/usage", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "start"}, "end": {"equalTo": "end"}, "accessor": {"equalTo": "12345678-1234-1234-1234-123456789012"}, "alternatives": {"equalTo": "true"}, "callback_method": {"equalTo": "true"}, "callback": {"equalTo": "true"}, "channels": {"equalTo": "true"}, "custom_intent_mode": {"equalTo": "true"}, "custom_intent": {"equalTo": "true"}, "custom_topic_mode": {"equalTo": "true"}, "custom_topic": {"equalTo": "true"}, "deployment": {"equalTo": "hosted"}, "detect_entities": {"equalTo": "true"}, "detect_language": {"equalTo": "true"}, "diarize": {"equalTo": "true"}, "dictation": {"equalTo": "true"}, "encoding": {"equalTo": "true"}, "endpoint": {"equalTo": "listen"}, "extra": {"equalTo": "true"}, "filler_words": {"equalTo": "true"}, "intents": {"equalTo": "true"}, "keyterm": {"equalTo": "true"}, "keywords": {"equalTo": "true"}, "language": {"equalTo": "true"}, "measurements": {"equalTo": "true"}, "method": {"equalTo": "sync"}, "model": {"equalTo": "6f548761-c9c0-429a-9315-11a1d28499c8"}, "multichannel": {"equalTo": "true"}, "numerals": {"equalTo": "true"}, "paragraphs": {"equalTo": "true"}, "profanity_filter": {"equalTo": "true"}, "punctuate": {"equalTo": "true"}, "redact": {"equalTo": "true"}, "replace": {"equalTo": "true"}, "sample_rate": {"equalTo": "true"}, "search": {"equalTo": "true"}, "sentiment": {"equalTo": "true"}, "smart_format": {"equalTo": "true"}, "summarize": {"equalTo": "true"}, "tag": {"equalTo": "tag1"}, "topics": {"equalTo": "true"}, "utt_split": {"equalTo": "true"}, "utterances": {"equalTo": "true"}, "version": {"equalTo": "true"}}}, "response": {"status": 200, "body": "{\n \"start\": \"2024-10-16\",\n \"end\": \"2024-10-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "6309dd55-c993-4ce1-b0b2-01a41c9f08d6", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "b132121b-4efe-42ad-a268-8acac35c189b", "name": "Get Project Balances - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/balances", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"balances\": [\n {\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "b132121b-4efe-42ad-a268-8acac35c189b", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "4019c244-52d3-4d57-902c-af837631650a", "name": "Get a Project Balance - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/balances/{balance_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "balance_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"balance_id\": \"balance_id\",\n \"amount\": 1.1,\n \"units\": \"units\",\n \"purchase_order_id\": \"purchase_order_id\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "4019c244-52d3-4d57-902c-af837631650a", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "555b6751-587f-400c-bf5e-400e108ad6b4", "name": "Get Project Billing Breakdown - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/billing/breakdown", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "start"}, "end": {"equalTo": "end"}, "accessor": {"equalTo": "12345678-1234-1234-1234-123456789012"}, "deployment": {"equalTo": "hosted"}, "tag": {"equalTo": "tag1"}, "line_item": {"equalTo": "streaming::nova-3"}}}, "response": {"status": 200, "body": "{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"dollars\": 0.25,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"deployment\": \"hosted\",\n \"line_item\": \"streaming::nova-3\",\n \"tags\": [\n \"tag1\",\n \"tag2\"\n ]\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "555b6751-587f-400c-bf5e-400e108ad6b4", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "a61ae38c-e41f-4726-a55c-88f2135897be", "name": "List Project Billing Fields - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/billing/fields", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "start"}, "end": {"equalTo": "end"}}}, "response": {"status": 200, "body": "{\n \"accessors\": [\n \"12345678-1234-1234-1234-123456789012\",\n \"87654321-4321-4321-4321-210987654321\"\n ],\n \"deployments\": [\n \"hosted\",\n \"self-hosted\"\n ],\n \"tags\": [\n \"dev\",\n \"production\"\n ],\n \"line_items\": {\n \"streaming::nova-3\": \"Nova - 3 (Stream)\",\n \"sync::aura-2\": \"Aura -2 (Sync)\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "a61ae38c-e41f-4726-a55c-88f2135897be", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "85b4373c-ba39-41b1-84e8-ae1ee6b180ca", "name": "List Project Purchases - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/purchases", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"limit": {"equalTo": "1.1"}}}, "response": {"status": 200, "body": "{\n \"orders\": [\n {\n \"order_id\": \"025e19ba-b6d9-4a04-9f99-4fe715aca5f1\",\n \"expiration\": \"2026-03-04T00:00:00Z\",\n \"created\": \"2023-02-21T21:13:40Z\",\n \"amount\": 150,\n \"units\": \"usd\",\n \"order_type\": \"promotional\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "85b4373c-ba39-41b1-84e8-ae1ee6b180ca", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "2dd14c67-ed4e-4d97-9636-0a712899deb8", "name": "List Project Invites - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/invites", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"invites\": [\n {\n \"email\": \"email\",\n \"scope\": \"scope\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "2dd14c67-ed4e-4d97-9636-0a712899deb8", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "7c109496-adfe-4e85-b007-a6f799ee95cb", "name": "Create a Project Invite - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/invites", "method": "POST", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "7c109496-adfe-4e85-b007-a6f799ee95cb", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "d6d268d0-d91e-4a65-80e0-339621173db9", "name": "Delete a Project Invite - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/invites/{email}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "email": {"equalTo": "john.doe@example.com"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "d6d268d0-d91e-4a65-80e0-339621173db9", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "1b965d71-c930-4a0b-90f3-2289f80f3634", "name": "List Project Member Scopes - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/members/{member_id}/scopes", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "member_id": {"equalTo": "123456789012345678901234"}}}, "response": {"status": 200, "body": "{\n \"scopes\": [\n \"scopes\"\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "1b965d71-c930-4a0b-90f3-2289f80f3634", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "eb2f5de2-b887-47be-abd5-7cb702aca55d", "name": "Update Project Member Scopes - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/members/{member_id}/scopes", "method": "PUT", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "member_id": {"equalTo": "123456789012345678901234"}}}, "response": {"status": 200, "body": "{\n \"message\": \"message\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "eb2f5de2-b887-47be-abd5-7cb702aca55d", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "9bdf51a4-1e10-41b8-8de2-2df650562db3", "name": "Get Project Usage Breakdown - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/usage/breakdown", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "start"}, "end": {"equalTo": "end"}, "grouping": {"equalTo": "accessor"}, "accessor": {"equalTo": "12345678-1234-1234-1234-123456789012"}, "alternatives": {"equalTo": "true"}, "callback_method": {"equalTo": "true"}, "callback": {"equalTo": "true"}, "channels": {"equalTo": "true"}, "custom_intent_mode": {"equalTo": "true"}, "custom_intent": {"equalTo": "true"}, "custom_topic_mode": {"equalTo": "true"}, "custom_topic": {"equalTo": "true"}, "deployment": {"equalTo": "hosted"}, "detect_entities": {"equalTo": "true"}, "detect_language": {"equalTo": "true"}, "diarize": {"equalTo": "true"}, "dictation": {"equalTo": "true"}, "encoding": {"equalTo": "true"}, "endpoint": {"equalTo": "listen"}, "extra": {"equalTo": "true"}, "filler_words": {"equalTo": "true"}, "intents": {"equalTo": "true"}, "keyterm": {"equalTo": "true"}, "keywords": {"equalTo": "true"}, "language": {"equalTo": "true"}, "measurements": {"equalTo": "true"}, "method": {"equalTo": "sync"}, "model": {"equalTo": "6f548761-c9c0-429a-9315-11a1d28499c8"}, "multichannel": {"equalTo": "true"}, "numerals": {"equalTo": "true"}, "paragraphs": {"equalTo": "true"}, "profanity_filter": {"equalTo": "true"}, "punctuate": {"equalTo": "true"}, "redact": {"equalTo": "true"}, "replace": {"equalTo": "true"}, "sample_rate": {"equalTo": "true"}, "search": {"equalTo": "true"}, "sentiment": {"equalTo": "true"}, "smart_format": {"equalTo": "true"}, "summarize": {"equalTo": "true"}, "tag": {"equalTo": "tag1"}, "topics": {"equalTo": "true"}, "utt_split": {"equalTo": "true"}, "utterances": {"equalTo": "true"}, "version": {"equalTo": "true"}}}, "response": {"status": 200, "body": "{\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-23\",\n \"resolution\": {\n \"units\": \"day\",\n \"amount\": 1\n },\n \"results\": [\n {\n \"hours\": 1619.7242069444444,\n \"total_hours\": 1621.7395791666668,\n \"agent_hours\": 41.33564388888889,\n \"tokens_in\": 0,\n \"tokens_out\": 0,\n \"tts_characters\": 9158866,\n \"requests\": 373381,\n \"grouping\": {\n \"start\": \"2025-01-16\",\n \"end\": \"2025-01-16\",\n \"accessor\": \"123456789012345678901234\",\n \"endpoint\": \"listen\",\n \"feature_set\": \"punctuate\",\n \"models\": [\n \"Nova-2\"\n ],\n \"method\": \"async\",\n \"tags\": \"tag1\",\n \"deployment\": \"self-hosted\"\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "9bdf51a4-1e10-41b8-8de2-2df650562db3", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e", "name": "List Project Usage Fields - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/usage/fields", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"start": {"equalTo": "start"}, "end": {"equalTo": "end"}}}, "response": {"status": 200, "body": "{\n \"tags\": [\n \"tag=dev\",\n \"tag=production\"\n ],\n \"models\": [\n {\n \"name\": \"2-medical-nova\",\n \"language\": \"en-MY\",\n \"version\": \"2024-05-31.13574\",\n \"model_id\": \"1234567890-12345-67890\"\n }\n ],\n \"processing_methods\": [\n \"sync\",\n \"streaming\"\n ],\n \"features\": [\n \"alternatives\",\n \"detect_entities\",\n \"detect_language\"\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "bc9fcd54-076e-48dc-a2dd-d71a8bf8bd4e", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b", "name": "Analyze text content - default", "request": {"urlPathTemplate": "/v1/read", "method": "POST", "queryParameters": {"callback": {"equalTo": "callback"}, "callback_method": {"equalTo": "POST"}, "sentiment": {"equalTo": "true"}, "summarize": {"equalTo": "v2"}, "tag": {"equalTo": "tag"}, "topics": {"equalTo": "true"}, "custom_topic": {"equalTo": "custom_topic"}, "custom_topic_mode": {"equalTo": "extended"}, "intents": {"equalTo": "true"}, "custom_intent": {"equalTo": "custom_intent"}, "custom_intent_mode": {"equalTo": "extended"}, "language": {"equalTo": "language"}}}, "response": {"status": 200, "body": "{\n \"metadata\": {\n \"metadata\": {\n \"request_id\": \"d04af392-db11-4c1d-83e1-20e34f0b8999\",\n \"created\": \"2024-11-18T23:47:44Z\",\n \"language\": \"en\"\n }\n },\n \"results\": {\n \"summary\": {\n \"results\": {\n \"summary\": {\n \"text\": \"The summary of the text submitted.\"\n }\n }\n },\n \"topics\": {\n \"results\": {\n \"topics\": {\n \"segments\": [\n {\n \"text\": \"And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 32,\n \"end_word\": 69,\n \"topics\": [\n {\n \"topic\": \"Spacewalk\",\n \"confidence_score\": 0.91581345\n }\n ]\n }\n ]\n }\n }\n },\n \"intents\": {\n \"results\": {\n \"intents\": {\n \"segments\": [\n {\n \"text\": \"If you found this valuable, you can subscribe to the show on spotify or your favorite podcast app.\",\n \"start_word\": 354,\n \"end_word\": 414,\n \"intents\": [\n {\n \"intent\": \"Encourage podcasting\",\n \"confidence_score\": 0.0038975573\n }\n ]\n }\n ]\n }\n }\n },\n \"sentiments\": {\n \"segments\": [\n {\n \"text\": \"Yeah. As as much as, um, it's worth celebrating, uh, the first, uh, spacewalk, um, with an all-female team, I think many of us are looking forward to it just being normal. And, um, I think if it signifies anything, it is, uh, to honor the the women who came before us who, um, were skilled and qualified, um, and didn't get the the same opportunities that we have today.\",\n \"start_word\": 0,\n \"end_word\": 69,\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810546875\n }\n ],\n \"average\": {\n \"sentiment\": \"positive\",\n \"sentiment_score\": 0.5810185185185185\n }\n }\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "b5ac3651-d3b7-4cd7-b0b8-e1a917a16f3b", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "4110cb96-50e2-4fe6-b8ae-5d69120cee89", "name": "List Project Self-Hosted Distribution Credentials - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/self-hosted/distribution/credentials", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"distribution_credentials\": [\n {\n \"member\": {\n \"member_id\": \"3376abcd-8e5e-49d3-92d4-876d3a4f0363\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"8b36cfd0-472f-4a21-833f-2d6343c3a2f3\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "4110cb96-50e2-4fe6-b8ae-5d69120cee89", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "9c12eea9-6ba6-4d70-bb14-a2742cebc114", "name": "Create a Project Self-Hosted Distribution Credential - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/self-hosted/distribution/credentials", "method": "POST", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}, "queryParameters": {"provider": {"equalTo": "quay"}}}, "response": {"status": 200, "body": "{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "9c12eea9-6ba6-4d70-bb14-a2742cebc114", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "a47cd13f-2314-4190-b2c7-20436ccffbd2", "name": "Get a Project Self-Hosted Distribution Credential - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "distribution_credentials_id": {"equalTo": "8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}}, "response": {"status": 200, "body": "{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "a47cd13f-2314-4190-b2c7-20436ccffbd2", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "8bd46091-0e57-4b3d-9485-a86e6f1eaf17", "name": "Delete a Project Self-Hosted Distribution Credential - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/self-hosted/distribution/credentials/{distribution_credentials_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "distribution_credentials_id": {"equalTo": "8b36cfd0-472f-4a21-833f-2d6343c3a2f3"}}}, "response": {"status": 200, "body": "{\n \"member\": {\n \"member_id\": \"c7b9b131-73f3-11d9-8665-0b00d2e44b83\",\n \"email\": \"email@example.com\"\n },\n \"distribution_credentials\": {\n \"distribution_credentials_id\": \"82c32c10-53b2-4d23-993f-864b3d44502a\",\n \"provider\": \"quay\",\n \"comment\": \"My Self-Hosted Distribution Credentials\",\n \"scopes\": [\n \"self-hosted:product:api\",\n \"self-hosted:product:engine\"\n ],\n \"created\": \"2023-06-28T15:36:59Z\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "8bd46091-0e57-4b3d-9485-a86e6f1eaf17", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "b06ec977-02ed-41e2-8fff-2bc45cd2166b", "name": "Text to Speech transformation - default", "request": {"urlPathTemplate": "/v1/speak", "method": "POST"}, "response": {"status": 200, "body": "{\n \"key\": \"value\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "b06ec977-02ed-41e2-8fff-2bc45cd2166b", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "61883f62-09ec-4be9-b477-13a89b9677cf", "name": "List Agent Configurations - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agents", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"agents\": [\n {\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "61883f62-09ec-4be9-b477-13a89b9677cf", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "a43cdbee-2b2f-47fb-8220-75d337cb6d3a", "name": "Create an Agent Configuration - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agents", "method": "POST", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n }\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "a43cdbee-2b2f-47fb-8220-75d337cb6d3a", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "2575dd3c-b1c8-4007-8f87-4e32073a9dcf", "name": "Get an Agent Configuration - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agents/{agent_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "agent_id": {"equalTo": "a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}}, "response": {"status": 200, "body": "{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "2575dd3c-b1c8-4007-8f87-4e32073a9dcf", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "3b52c271-0e44-4148-b726-6f5ca8c91d26", "name": "Update Agent Metadata - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agents/{agent_id}", "method": "PUT", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "agent_id": {"equalTo": "a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}}, "response": {"status": 200, "body": "{\n \"agent_id\": \"agent_id\",\n \"config\": {\n \"key\": \"value\"\n },\n \"metadata\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "3b52c271-0e44-4148-b726-6f5ca8c91d26", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "e839386d-beaa-4a7a-a478-ed9d9ab1b63e", "name": "Delete an Agent Configuration - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agents/{agent_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "agent_id": {"equalTo": "a1b2c3d4-e5f6-7890-abcd-ef1234567890"}}}, "response": {"status": 200, "body": "{\n \"key\": \"value\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "e839386d-beaa-4a7a-a478-ed9d9ab1b63e", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "bb85071e-3933-4cd3-8fd7-d35eb4c992c5", "name": "List Agent Variables - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agent-variables", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}}}, "response": {"status": 200, "body": "{\n \"variables\": [\n {\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n }\n ]\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "bb85071e-3933-4cd3-8fd7-d35eb4c992c5", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "c5016e4b-f863-4dc7-972f-c577ac7fdc47", "name": "Create an Agent Variable - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agent-variables", "method": "POST", "pathParameters": {"project_id": {"equalTo": "project_id"}}}, "response": {"status": 200, "body": "{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "c5016e4b-f863-4dc7-972f-c577ac7fdc47", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "5bb4b688-8475-4911-8ad5-725b7d5338e3", "name": "Get an Agent Variable - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agent-variables/{variable_id}", "method": "GET", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "variable_id": {"equalTo": "v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}}, "response": {"status": 200, "body": "{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "5bb4b688-8475-4911-8ad5-725b7d5338e3", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "2e2c7061-7573-423f-a907-2a15dd9f3307", "name": "Delete an Agent Variable - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agent-variables/{variable_id}", "method": "DELETE", "pathParameters": {"project_id": {"equalTo": "123456-7890-1234-5678-901234"}, "variable_id": {"equalTo": "v1a2b3c4-d5e6-7890-abcd-ef1234567890"}}}, "response": {"status": 200, "body": "{\n \"key\": \"value\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "2e2c7061-7573-423f-a907-2a15dd9f3307", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}, {"id": "33765621-bc60-4ef4-b295-4b276aeb871f", "name": "Update an Agent Variable - default", "request": {"urlPathTemplate": "/v1/projects/{project_id}/agent-variables/{variable_id}", "method": "PATCH", "pathParameters": {"project_id": {"equalTo": "project_id"}, "variable_id": {"equalTo": "variable_id"}}}, "response": {"status": 200, "body": "{\n \"variable_id\": \"variable_id\",\n \"key\": \"key\",\n \"value\": {\n \"key\": \"value\"\n },\n \"created_at\": \"2024-01-15T09:30:00Z\",\n \"updated_at\": \"2024-01-15T09:30:00Z\"\n}", "headers": {"Content-Type": "application/json"}}, "uuid": "33765621-bc60-4ef4-b295-4b276aeb871f", "persistent": true, "priority": 3, "metadata": {"mocklab": {"created": {"at": "2020-01-01T00:00:00.000Z", "via": "SYSTEM"}}}}], "meta": {"total": 49}} \ No newline at end of file From f241c07cab7a3d24d2638cf580f4c0e6a84ffafa Mon Sep 17 00:00:00 2001 From: Greg Holmes Date: Tue, 14 Apr 2026 13:19:58 +0100 Subject: [PATCH 8/8] fix(examples): update examples for current API surface --- examples/13-transcription-live-websocket.py | 37 ++++++--- .../14-transcription-live-websocket-v2.py | 18 +++-- examples/23-text-builder-helper.py | 75 +++++++++---------- examples/24-text-builder-streaming.py | 15 ++-- examples/30-voice-agent.py | 56 ++++++++++---- examples/51-management-keys.py | 8 +- examples/52-management-members.py | 4 +- examples/55-management-billing.py | 10 ++- 8 files changed, 132 insertions(+), 91 deletions(-) diff --git a/examples/13-transcription-live-websocket.py b/examples/13-transcription-live-websocket.py index e12c9f67..ed0a93f5 100644 --- a/examples/13-transcription-live-websocket.py +++ b/examples/13-transcription-live-websocket.py @@ -2,8 +2,12 @@ Example: Live Transcription with WebSocket (Listen V1) This example shows how to stream audio for real-time transcription using WebSocket. +It streams a pre-recorded audio file in chunks to simulate a live microphone feed. """ +import os +import threading +import time from typing import Union from dotenv import load_dotenv @@ -23,33 +27,46 @@ client = DeepgramClient() +# Audio chunking: simulate real-time streaming by sending 1-second chunks +sample_rate = 44100 # Hz (matches fixtures/audio.wav) +chunk_size = sample_rate * 2 # 2 bytes per sample (linear16 PCM mono) +chunk_delay = 1.0 # seconds between chunks + +audio_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures", "audio.wav") + try: with client.listen.v1.connect(model="nova-3") as connection: def on_message(message: ListenV1SocketClientResponse) -> None: msg_type = getattr(message, "type", "Unknown") - print(f"Received {msg_type} event") - - # Extract transcription from Results events if isinstance(message, ListenV1Results): if message.channel and message.channel.alternatives: transcript = message.channel.alternatives[0].transcript if transcript: print(f"Transcript: {transcript}") + else: + print(f"Received {msg_type} event") connection.on(EventType.OPEN, lambda _: print("Connection opened")) connection.on(EventType.MESSAGE, on_message) connection.on(EventType.CLOSE, lambda _: print("Connection closed")) connection.on(EventType.ERROR, lambda error: print(f"Error: {error}")) - # Start listening - this blocks until the connection closes - # In production, you would send audio data here: - # audio_path = os.path.join(os.path.dirname(__file__), "..", "fixtures", "audio.wav") - # with open(audio_path, "rb") as audio_file: - # audio_data = audio_file.read() - # connection.send_listen_v_1_media(audio_data) + # Start listening in a background thread so we can send audio concurrently + threading.Thread(target=connection.start_listening, daemon=True).start() + + # Stream audio file in chunks to simulate live microphone input + with open(audio_path, "rb") as f: + audio_data = f.read() + + for i in range(0, len(audio_data), chunk_size): + chunk = audio_data[i : i + chunk_size] + if chunk: + connection.send_media(chunk) + time.sleep(chunk_delay) - connection.start_listening() + # Wait for final transcription results + time.sleep(2) # For async version: # from deepgram import AsyncDeepgramClient diff --git a/examples/14-transcription-live-websocket-v2.py b/examples/14-transcription-live-websocket-v2.py index 57e91b18..c60a50ab 100644 --- a/examples/14-transcription-live-websocket-v2.py +++ b/examples/14-transcription-live-websocket-v2.py @@ -36,14 +36,22 @@ ) as connection: def on_message(message: ListenV2SocketClientResponse) -> None: - msg_type = getattr(message, "type", type(message).__name__) - print(f"Received {msg_type} event ({type(message).__name__})") - - # Extract transcription from TurnInfo events - if isinstance(message, ListenV2TurnInfo): + # V2 messages may arrive as typed objects or dicts depending on the union match + if isinstance(message, dict): + msg_type = message.get("type", "Unknown") + print(f"Received {msg_type} event") + if msg_type == "TurnInfo": + print(f" transcript: {message.get('transcript', '')}") + print(f" event: {message.get('event', '')}") + print(f" turn_index: {message.get('turn_index', '')}") + elif isinstance(message, ListenV2TurnInfo): + print(f"Received TurnInfo event") print(f" transcript: {message.transcript}") print(f" event: {message.event}") print(f" turn_index: {message.turn_index}") + else: + msg_type = getattr(message, "type", type(message).__name__) + print(f"Received {msg_type} event") connection.on(EventType.OPEN, lambda _: print("Connection opened")) connection.on(EventType.MESSAGE, on_message) diff --git a/examples/23-text-builder-helper.py b/examples/23-text-builder-helper.py index 67ac8551..48ab745a 100644 --- a/examples/23-text-builder-helper.py +++ b/examples/23-text-builder-helper.py @@ -10,10 +10,6 @@ from deepgram import DeepgramClient from deepgram.helpers import TextBuilder, add_pronunciation, ssml_to_deepgram -from deepgram.speak.v1.audio.types import ( - AudioGenerateRequestEncoding, - AudioGenerateRequestModel, -) def example_basic_text_builder(): @@ -21,16 +17,16 @@ def example_basic_text_builder(): print("Example 1: Basic TextBuilder Usage") print("-" * 50) - # Build text with pronunciations and pauses + # Build text with pronunciations + # Note: .pause() is supported in streaming (WebSocket) mode. + # For REST API, use plain text between pronunciations. text = ( TextBuilder() .text("Take ") .pronunciation("azathioprine", "ˌæzəˈθaɪəpriːn") .text(" twice daily with ") .pronunciation("dupilumab", "duːˈpɪljuːmæb") - .text(" injections") - .pause(500) - .text(" Do not exceed prescribed dosage.") + .text(" injections. Do not exceed prescribed dosage.") .build() ) @@ -42,15 +38,16 @@ def example_basic_text_builder(): client = DeepgramClient(api_key=api_key) # Generate speech with custom pronunciations - response = client.speak.v1.generate( - text, - model=AudioGenerateRequestModel.AURA_ASTERIA_EN, - encoding=AudioGenerateRequestEncoding.LINEAR16, + response = client.speak.v1.audio.generate( + text=text, + model="aura-2-asteria-en", + encoding="linear16", ) # Save to file with open("output_example1.wav", "wb") as f: - f.write(response) + for chunk in response: + f.write(chunk) print("✓ Audio saved to output_example1.wav") else: @@ -75,13 +72,14 @@ def example_add_pronunciation_function(): if api_key: client = DeepgramClient(api_key=api_key) - response = client.speak.v1.generate( - text, - model=AudioGenerateRequestModel.AURA_ASTERIA_EN, + response = client.speak.v1.audio.generate( + text=text, + model="aura-2-asteria-en", ) with open("output_example2.wav", "wb") as f: - f.write(response) + for chunk in response: + f.write(chunk) print("✓ Audio saved to output_example2.wav") else: @@ -96,10 +94,8 @@ def example_ssml_migration(): # Existing SSML from another TTS provider ssml = """ Welcome to your medication guide. - - Take azathioprine + Take azathioprine as prescribed. - Contact your doctor if you experience side effects. """ @@ -112,13 +108,14 @@ def example_ssml_migration(): if api_key: client = DeepgramClient(api_key=api_key) - response = client.speak.v1.generate( - text, - model=AudioGenerateRequestModel.AURA_ASTERIA_EN, + response = client.speak.v1.audio.generate( + text=text, + model="aura-2-asteria-en", ) with open("output_example3.wav", "wb") as f: - f.write(response) + for chunk in response: + f.write(chunk) print("✓ Audio saved to output_example3.wav") else: @@ -137,9 +134,7 @@ def example_mixed_ssml_and_builder(): text = ( TextBuilder() .from_ssml(ssml) - .pause(500) .text(" Store at room temperature.") - .pause(500) .text(" Keep out of reach of children.") .build() ) @@ -150,13 +145,14 @@ def example_mixed_ssml_and_builder(): if api_key: client = DeepgramClient(api_key=api_key) - response = client.speak.v1.generate( - text, - model=AudioGenerateRequestModel.AURA_ASTERIA_EN, + response = client.speak.v1.audio.generate( + text=text, + model="aura-2-asteria-en", ) with open("output_example4.wav", "wb") as f: - f.write(response) + for chunk in response: + f.write(chunk) print("✓ Audio saved to output_example4.wav") else: @@ -172,19 +168,15 @@ def example_pharmacy_instructions(): TextBuilder() .text("Prescription for ") .pronunciation("lisinopril", "laɪˈsɪnəprɪl") - .pause(300) - .text(" Take one tablet by mouth daily for hypertension.") - .pause(500) + .text(". Take one tablet by mouth daily for hypertension.") .text(" Common side effects may include ") .pronunciation("hypotension", "ˌhaɪpoʊˈtɛnʃən") .text(" or dizziness.") - .pause(500) .text(" Do not take with ") .pronunciation("aliskiren", "əˈlɪskɪrɛn") .text(" or ") .pronunciation("sacubitril", "səˈkjuːbɪtrɪl") - .pause(500) - .text(" Call your doctor if symptoms worsen.") + .text(". Call your doctor if symptoms worsen.") .build() ) @@ -194,14 +186,15 @@ def example_pharmacy_instructions(): if api_key: client = DeepgramClient(api_key=api_key) - response = client.speak.v1.generate( - text, - model=AudioGenerateRequestModel.AURA_ASTERIA_EN, - encoding=AudioGenerateRequestEncoding.LINEAR16, + response = client.speak.v1.audio.generate( + text=text, + model="aura-2-asteria-en", + encoding="linear16", ) with open("output_example5.wav", "wb") as f: - f.write(response) + for chunk in response: + f.write(chunk) print("✓ Audio saved to output_example5.wav") else: diff --git a/examples/24-text-builder-streaming.py b/examples/24-text-builder-streaming.py index fac0f8b9..3b49748e 100644 --- a/examples/24-text-builder-streaming.py +++ b/examples/24-text-builder-streaming.py @@ -65,16 +65,13 @@ def on_message(message: SpeakV1SocketClientResponse) -> None: connection.on(EventType.ERROR, lambda error: print(f"✗ Error: {error}")) # Send the TextBuilder-generated text - text_message = SpeakV1Text(text=text) - connection.send_speak_v_1_text(text_message) + connection.send_text(SpeakV1Text(text=text)) # Flush to ensure all text is processed - flush_message = SpeakV1Flush() - connection.send_speak_v_1_flush(flush_message) + connection.send_flush() # Close the connection when done - close_message = SpeakV1Close() - connection.send_speak_v_1_close(close_message) + connection.send_close() # Start listening - this blocks until the connection closes connection.start_listening() @@ -138,10 +135,10 @@ def on_message(message: SpeakV1SocketClientResponse) -> None: # Send multiple messages for i, text in enumerate([intro, instruction1, instruction2, closing], 1): print(f"Sending message {i}: {text[:50]}...") - connection.send_speak_v_1_text(SpeakV1Text(text=text)) + connection.send_text(SpeakV1Text(text=text)) - connection.send_speak_v_1_flush(SpeakV1Flush()) - connection.send_speak_v_1_close(SpeakV1Close()) + connection.send_flush() + connection.send_close() connection.start_listening() diff --git a/examples/30-voice-agent.py b/examples/30-voice-agent.py index 679e4690..f88188a7 100644 --- a/examples/30-voice-agent.py +++ b/examples/30-voice-agent.py @@ -2,8 +2,12 @@ Example: Voice Agent (Agent V1) This example shows how to set up a voice agent that can listen, think, and speak. +It streams a pre-recorded audio file to simulate user speech. """ +import os +import threading +import time from typing import Union from dotenv import load_dotenv @@ -29,6 +33,8 @@ client = DeepgramClient() +audio_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures", "audio.wav") + try: with client.agent.v1.connect() as agent: # Configure the agent settings @@ -52,7 +58,7 @@ model="gpt-4o-mini", temperature=0.7, ), - prompt="You are a helpful AI assistant.", + prompt="You are a helpful AI assistant. Keep your responses brief.", ), speak=SpeakSettingsV1( provider=SpeakSettingsV1Provider_Deepgram( @@ -68,31 +74,49 @@ def on_message(message: AgentV1SocketClientResponse) -> None: if isinstance(message, bytes): - print("Received audio data") - # In production, you would play this audio or write it to a file + print(f"Received agent audio ({len(message)} bytes)") else: msg_type = getattr(message, "type", "Unknown") - print(f"Received {msg_type} event") + if msg_type == "ConversationText": + role = getattr(message, "role", "unknown") + content = getattr(message, "content", "") + print(f"[{role}] {content}") + elif msg_type == "UserStartedSpeaking": + print(">> User started speaking") + elif msg_type == "AgentThinking": + print(">> Agent thinking...") + elif msg_type == "AgentStartedSpeaking": + print(">> Agent started speaking") + elif msg_type == "AgentAudioDone": + print(">> Agent finished speaking") + else: + print(f"Received {msg_type} event") agent.on(EventType.OPEN, lambda _: print("Connection opened")) agent.on(EventType.MESSAGE, on_message) agent.on(EventType.CLOSE, lambda _: print("Connection closed")) agent.on(EventType.ERROR, lambda error: print(f"Error: {error}")) - # Start listening - this blocks until the connection closes - # In production, you would send audio from your microphone or audio source: - # with open("audio.wav", "rb") as audio_file: - # audio_data = audio_file.read() - # agent.send_media(audio_data) + # Stream audio in a background thread + def send_audio(): + with open(audio_path, "rb") as f: + audio_data = f.read() - agent.start_listening() + chunk_size = 8192 + for i in range(0, len(audio_data), chunk_size): + chunk = audio_data[i : i + chunk_size] + if chunk: + agent.send_media(chunk) + time.sleep(0.01) + + print("Finished streaming audio, waiting for agent response...") + time.sleep(15) - # For async version: - # from deepgram import AsyncDeepgramClient - # async with client.agent.v1.connect() as agent: - # # ... same configuration ... - # await agent.send_settings(settings) - # await agent.start_listening() + sender = threading.Thread(target=send_audio, daemon=True) + sender.start() + + # Start listening - blocks until connection closes + agent.start_listening() except Exception as e: print(f"Error: {e}") diff --git a/examples/51-management-keys.py b/examples/51-management-keys.py index 54c41709..761bad25 100644 --- a/examples/51-management-keys.py +++ b/examples/51-management-keys.py @@ -41,17 +41,17 @@ "scopes": ["usage:read"], }, ) - print(f"Created key ID: {new_key.key_id}") + print(f"Created key ID: {new_key.api_key_id}") print(f"Key: {new_key.key}") print("⚠️ Save this key now - it won't be shown again!") # Get a specific key if keys.api_keys: - key_id = keys.api_keys[0].key_id + key_id = keys.api_keys[0].api_key.api_key_id print(f"\nGetting key details for: {key_id}") key = client.manage.v1.projects.keys.get(project_id=project_id, key_id=key_id) - print(f"Key comment: {key.comment}") - print(f"Key scopes: {key.scopes}") + print(f"Key comment: {key.api_key['comment']}") + print(f"Key scopes: {key.api_key['scopes']}") # Delete a key (commented out for safety) # client.manage.v1.projects.keys.delete( diff --git a/examples/52-management-members.py b/examples/52-management-members.py index 6d23fe2b..0ccc65b7 100644 --- a/examples/52-management-members.py +++ b/examples/52-management-members.py @@ -35,14 +35,14 @@ member_id = members.members[0].member_id print(f"\nGetting scopes for member: {member_id}") scopes = client.manage.v1.projects.members.scopes.list(project_id=project_id, member_id=member_id) - print(f"Member scopes: {scopes.scope}") + print(f"Member scopes: {scopes.scopes}") # Update member scopes print("\nUpdating member scopes...") updated = client.manage.v1.projects.members.scopes.update( project_id=project_id, member_id=member_id, scope="admin" ) - print(f"Updated scopes: {updated.scope}") + print(f"Updated scopes: {updated.scopes}") # Remove a member (commented out for safety) # client.manage.v1.projects.members.delete( diff --git a/examples/55-management-billing.py b/examples/55-management-billing.py index ec4037d0..3f87fa10 100644 --- a/examples/55-management-billing.py +++ b/examples/55-management-billing.py @@ -39,18 +39,20 @@ # Get billing breakdown print("\nGetting billing breakdown...") - breakdown = client.manage.v1.projects.billing.breakdown.get(project_id=project_id) - print(f"Breakdown entries: {len(breakdown.entries) if breakdown.entries else 0}") + breakdown = client.manage.v1.projects.billing.breakdown.list(project_id=project_id) + print(f"Breakdown results: {len(breakdown.results) if breakdown.results else 0}") # Get billing fields print("\nGetting billing fields...") fields = client.manage.v1.projects.billing.fields.list(project_id=project_id) - print(f"Available fields: {len(fields.fields) if fields.fields else 0}") + print(f"Accessors: {len(fields.accessors) if fields.accessors else 0}" + f", Deployments: {len(fields.deployments) if fields.deployments else 0}" + f", Tags: {len(fields.tags) if fields.tags else 0}") # List billing purchases print("\nListing billing purchases...") purchases = client.manage.v1.projects.billing.purchases.list(project_id=project_id) - print(f"Found {len(purchases.purchases) if purchases.purchases else 0} purchases") + print(f"Found {len(purchases.orders) if purchases.orders else 0} purchases") # For async version: # from deepgram import AsyncDeepgramClient