diff --git a/aixplain/factories/agent_factory/__init__.py b/aixplain/factories/agent_factory/__init__.py index c90dcc05..55bebe5b 100644 --- a/aixplain/factories/agent_factory/__init__.py +++ b/aixplain/factories/agent_factory/__init__.py @@ -23,6 +23,7 @@ import json import logging +import warnings from aixplain.enums.function import Function from aixplain.enums.supplier import Supplier @@ -46,6 +47,7 @@ def create( cls, name: Text, description: Text, + role: Optional[Text] = None, llm_id: Text = "669a63646eb56306647e1091", tools: List[Tool] = [], api_key: Text = config.TEAM_API_KEY, @@ -54,9 +56,15 @@ def create( ) -> Agent: """Create a new agent in the platform. + Warning: + The 'role' parameter was recently added and serves the same purpose as 'description' did previously: set the role of the agent as a system prompt. + The 'description' parameter is still required and should be used to set a short summary of the agent's purpose. + For the next releases, the 'role' parameter will be required. + Args: name (Text): name of the agent description (Text): description of the agent role. + role (Text): role of the agent. llm_id (Text, optional): aiXplain ID of the large language model to be used as agent. Defaults to "669a63646eb56306647e1091" (GPT-4o mini). tools (List[Tool], optional): list of tool for the agent. Defaults to []. api_key (Text, optional): team/user API key. Defaults to config.TEAM_API_KEY. @@ -66,6 +74,12 @@ def create( Returns: Agent: created Agent """ + warnings.warn( + "The 'role' parameter was recently added and serves the same purpose as 'description' did previously: set the role of the agent as a system prompt. " + "The 'description' parameter is still required and should be used to set a short summary of the agent's purpose. " + "For the next releases, the 'role' parameter will be required.", + UserWarning, + ) from aixplain.factories.agent_factory.utils import build_agent agent = None @@ -81,6 +95,7 @@ def create( "name": name, "assets": [tool.to_dict() for tool in tools], "description": description, + "role": role or description, "supplier": supplier, "version": version, "llmId": llm_id, diff --git a/aixplain/factories/agent_factory/utils.py b/aixplain/factories/agent_factory/utils.py index d6857468..0185a76b 100644 --- a/aixplain/factories/agent_factory/utils.py +++ b/aixplain/factories/agent_factory/utils.py @@ -3,7 +3,10 @@ import aixplain.utils.config as config from aixplain.enums import Function, Supplier from aixplain.enums.asset_status import AssetStatus -from aixplain.modules.agent import Agent, ModelTool, PipelineTool, PythonInterpreterTool +from aixplain.modules.agent import Agent +from aixplain.modules.agent.tool.model_tool import ModelTool +from aixplain.modules.agent.tool.pipeline_tool import PipelineTool +from aixplain.modules.agent.tool.python_interpreter_tool import PythonInterpreterTool from aixplain.modules.agent.tool.custom_python_code_tool import CustomPythonCodeTool from typing import Dict, Text from urllib.parse import urljoin @@ -50,6 +53,7 @@ def build_agent(payload: Dict, api_key: Text = config.TEAM_API_KEY) -> Agent: name=payload.get("name", ""), tools=tools, description=payload.get("description", ""), + role=payload.get("role", ""), supplier=payload.get("teamId", None), version=payload.get("version", None), cost=payload.get("cost", None), diff --git a/aixplain/factories/asset_factory.py b/aixplain/factories/asset_factory.py index 51192b2a..56d938db 100644 --- a/aixplain/factories/asset_factory.py +++ b/aixplain/factories/asset_factory.py @@ -22,7 +22,7 @@ """ from abc import abstractmethod -from typing import List, Text +from typing import Text from aixplain.modules.asset import Asset from aixplain.utils import config diff --git a/aixplain/factories/benchmark_factory.py b/aixplain/factories/benchmark_factory.py index 743ed7fa..3f643a3d 100644 --- a/aixplain/factories/benchmark_factory.py +++ b/aixplain/factories/benchmark_factory.py @@ -43,7 +43,6 @@ class BenchmarkFactory: backend_url (str): The URL for the backend. """ - backend_url = config.BACKEND_URL @classmethod diff --git a/aixplain/factories/dataset_factory.py b/aixplain/factories/dataset_factory.py index c7ccad70..ca9d993e 100644 --- a/aixplain/factories/dataset_factory.py +++ b/aixplain/factories/dataset_factory.py @@ -57,7 +57,6 @@ class DatasetFactory(AssetFactory): backend_url (str): The URL for the backend. """ - backend_url = config.BACKEND_URL @classmethod diff --git a/aixplain/factories/model_factory/__init__.py b/aixplain/factories/model_factory/__init__.py index bbb93cfa..b39cc668 100644 --- a/aixplain/factories/model_factory/__init__.py +++ b/aixplain/factories/model_factory/__init__.py @@ -43,8 +43,8 @@ class ModelFactory: @classmethod def create_utility_model( cls, - name: Text, - code: Union[Text, Callable], + name: Optional[Text] = None, + code: Union[Text, Callable] = None, inputs: List[UtilityModelInput] = [], description: Optional[Text] = None, output_examples: Text = "", @@ -153,6 +153,8 @@ def list( sort_order: SortOrder = SortOrder.ASCENDING, page_number: int = 0, page_size: int = 20, + model_ids: Optional[List[Text]] = None, + api_key: Optional[Text] = None, ) -> List[Model]: """Gets the first k given models based on the provided task and language filters @@ -165,25 +167,44 @@ def list( sort_by (Optional[SortBy], optional): sort the retrived models by a specific attribute, page_number (int, optional): page number. Defaults to 0. page_size (int, optional): page size. Defaults to 20. + model_ids (Optional[List[Text]], optional): model ids to filter. Defaults to None. + api_key (Optional[Text], optional): Team API key. Defaults to None. Returns: List[Model]: List of models based on given filters """ - from aixplain.factories.model_factory.utils import get_assets_from_page - - models, total = get_assets_from_page( - query, - page_number, - page_size, - function, - suppliers, - source_languages, - target_languages, - is_finetunable, - ownership, - sort_by, - sort_order, - ) + if model_ids is not None: + from aixplain.factories.model_factory.utils import get_model_from_ids + + assert len(model_ids) > 0, "Please provide at least one model id" + assert ( + function is None + and suppliers is None + and source_languages is None + and target_languages is None + and is_finetunable is None + and ownership is None + and sort_by is None + ), "Cannot filter by function, suppliers, source languages, target languages, is finetunable, ownership, sort by when using model ids" + assert len(model_ids) <= page_size, "Page size must be greater than the number of model ids" + models, total = get_model_from_ids(model_ids, api_key), len(model_ids) + else: + from aixplain.factories.model_factory.utils import get_assets_from_page + + models, total = get_assets_from_page( + query, + page_number, + page_size, + function, + suppliers, + source_languages, + target_languages, + is_finetunable, + ownership, + sort_by, + sort_order, + api_key, + ) return { "results": models, "page_total": min(page_size, len(models)), diff --git a/aixplain/factories/model_factory/utils.py b/aixplain/factories/model_factory/utils.py index 9fbf8f41..abdd474b 100644 --- a/aixplain/factories/model_factory/utils.py +++ b/aixplain/factories/model_factory/utils.py @@ -94,6 +94,7 @@ def get_assets_from_page( ownership: Optional[Tuple[OwnershipType, List[OwnershipType]]] = None, sort_by: Optional[SortBy] = None, sort_order: SortOrder = SortOrder.ASCENDING, + api_key: Optional[str] = None, ) -> List[Model]: try: url = urljoin(config.BACKEND_URL, "sdk/models/paginate") @@ -146,9 +147,49 @@ def get_assets_from_page( all_models = resp["items"] from aixplain.factories.model_factory.utils import create_model_from_response - model_list = [create_model_from_response(model_info_json) for model_info_json in all_models] + model_list = [] + for model_info_json in all_models: + model_info_json["api_key"] = config.TEAM_API_KEY + if api_key is not None: + model_info_json["api_key"] = api_key + model_list.append(create_model_from_response(model_info_json)) return model_list, resp["total"] else: error_message = f"Listing Models Error: Failed to retrieve models. Status Code: {r.status_code}. Error: {resp}" logging.error(error_message) raise Exception(error_message) + + +def get_model_from_ids(model_ids: List[str], api_key: Optional[str] = None) -> List[Model]: + from aixplain.factories.model_factory.utils import create_model_from_response + + resp = None + try: + url = urljoin(config.BACKEND_URL, f"sdk/models?ids={','.join(model_ids)}") + + headers = {"Authorization": f"Token {config.TEAM_API_KEY}", "Content-Type": "application/json"} + logging.info(f"Start service for GET Model - {url} - {headers}") + r = _request_with_retry("get", url, headers=headers) + resp = r.json() + + except Exception: + if resp is not None and "statusCode" in resp: + status_code = resp["statusCode"] + message = resp["message"] + message = f"Model Creation: Status {status_code} - {message}" + else: + message = "Model Creation: Unspecified Error" + logging.error(message) + raise Exception(f"{message}") + if 200 <= r.status_code < 300: + models = [] + for item in resp["items"]: + item["api_key"] = config.TEAM_API_KEY + if api_key is not None: + item["api_key"] = api_key + models.append(create_model_from_response(item)) + return models + else: + error_message = f"Model GET Error: Failed to retrieve models {model_ids}. Status Code: {r.status_code}. Error: {resp}" + logging.error(error_message) + raise Exception(error_message) diff --git a/aixplain/factories/pipeline_factory/__init__.py b/aixplain/factories/pipeline_factory/__init__.py index f960d6da..cfbfce54 100644 --- a/aixplain/factories/pipeline_factory/__init__.py +++ b/aixplain/factories/pipeline_factory/__init__.py @@ -121,9 +121,9 @@ def get_assets_from_page(cls, page_number: int) -> List[Pipeline]: url = urljoin(cls.backend_url, f"sdk/pipelines/?pageNumber={page_number}") headers = { - "Authorization": f"Token {config.TEAM_API_KEY}", - "Content-Type": "application/json", - } + "Authorization": f"Token {config.TEAM_API_KEY}", + "Content-Type": "application/json", + } r = _request_with_retry("get", url, headers=headers) resp = r.json() logging.info(f"Listing Pipelines: Status of getting Pipelines on Page {page_number}: {resp}") @@ -172,9 +172,9 @@ def list( url = urljoin(cls.backend_url, "sdk/pipelines/paginate") headers = { - "Authorization": f"Token {config.TEAM_API_KEY}", - "Content-Type": "application/json", - } + "Authorization": f"Token {config.TEAM_API_KEY}", + "Content-Type": "application/json", + } assert 0 < page_size <= 100, "Pipeline List Error: Page size must be greater than 0 and not exceed 100." payload = { diff --git a/aixplain/factories/pipeline_factory/utils.py b/aixplain/factories/pipeline_factory/utils.py index 2a7de16b..b571b427 100644 --- a/aixplain/factories/pipeline_factory/utils.py +++ b/aixplain/factories/pipeline_factory/utils.py @@ -56,13 +56,9 @@ def build_from_response(response: Dict, load_architecture: bool = False) -> Pipe else: node = BareAsset(asset_id=node_json["assetId"]) elif node_json["type"].lower() == "decision": - node = Decision( - routes=[Route(**route) for route in node_json["routes"]] - ) + node = Decision(routes=[Route(**route) for route in node_json["routes"]]) elif node_json["type"].lower() == "router": - node = Router( - routes=[Route(**route) for route in node_json["routes"]] - ) + node = Router(routes=[Route(**route) for route in node_json["routes"]]) elif node_json["type"].lower() == "script": node = Script( fileId=node_json["fileId"], @@ -74,20 +70,10 @@ def build_from_response(response: Dict, load_architecture: bool = False) -> Pipe if "inputValues" in node_json: [ node.inputs.create_param( - data_type=( - DataType(input_param["dataType"]) - if "dataType" in input_param - else None - ), + data_type=(DataType(input_param["dataType"]) if "dataType" in input_param else None), code=input_param["code"], - value=( - input_param["value"] if "value" in input_param else None - ), - is_required=( - input_param["isRequired"] - if "isRequired" in input_param - else False - ), + value=(input_param["value"] if "value" in input_param else None), + is_required=(input_param["isRequired"] if "isRequired" in input_param else False), ) for input_param in node_json["inputValues"] if input_param["code"] not in node.inputs @@ -95,22 +81,10 @@ def build_from_response(response: Dict, load_architecture: bool = False) -> Pipe if "outputValues" in node_json: [ node.outputs.create_param( - data_type=( - DataType(output_param["dataType"]) - if "dataType" in output_param - else None - ), + data_type=(DataType(output_param["dataType"]) if "dataType" in output_param else None), code=output_param["code"], - value=( - output_param["value"] - if "value" in output_param - else None - ), - is_required=( - output_param["isRequired"] - if "isRequired" in output_param - else False - ), + value=(output_param["value"] if "value" in output_param else None), + is_required=(output_param["isRequired"] if "isRequired" in output_param else False), ) for output_param in node_json["outputValues"] if output_param["code"] not in node.outputs diff --git a/aixplain/modules/agent/__init__.py b/aixplain/modules/agent/__init__.py index 5ff9ff69..8f79294a 100644 --- a/aixplain/modules/agent/__init__.py +++ b/aixplain/modules/agent/__init__.py @@ -34,10 +34,9 @@ from aixplain.modules.model import Model from aixplain.modules.agent.output_format import OutputFormat from aixplain.modules.agent.tool import Tool -from aixplain.modules.agent.tool.model_tool import ModelTool -from aixplain.modules.agent.tool.pipeline_tool import PipelineTool -from aixplain.modules.agent.tool.python_interpreter_tool import PythonInterpreterTool -from aixplain.modules.agent.tool.custom_python_code_tool import CustomPythonCodeTool +from aixplain.modules.agent.agent_response import AgentResponse +from aixplain.modules.agent.agent_response_data import AgentResponseData +from aixplain.enums import ResponseStatus from aixplain.modules.agent.utils import process_variables from typing import Dict, List, Text, Optional, Union from urllib.parse import urljoin @@ -66,6 +65,7 @@ def __init__( id: Text, name: Text, description: Text, + role: Text, tools: List[Tool] = [], llm_id: Text = "6646261c6eb563165658bbb1", api_key: Optional[Text] = config.TEAM_API_KEY, @@ -81,6 +81,7 @@ def __init__( id (Text): ID of the Agent name (Text): Name of the Agent description (Text): description of the Agent. + role (Text): role of the Agent. tools (List[Tool]): List of tools that the Agent uses. llm_id (Text, optional): large language model. Defaults to GPT-4o (6646261c6eb563165658bbb1). supplier (Text): Supplier of the Agent. @@ -90,6 +91,7 @@ def __init__( cost (Dict, optional): model price. Defaults to None. """ super().__init__(id, name, description, api_key, supplier, version, cost=cost) + self.role = role self.additional_info = additional_info self.tools = tools for i, _ in enumerate(tools): @@ -108,8 +110,8 @@ def validate(self) -> None: # validate name assert ( - re.match("^[a-zA-Z0-9 ]*$", self.name) is not None - ), "Agent Creation Error: Agent name must not contain special characters." + re.match(r"^[a-zA-Z0-9 \-\(\)]*$", self.name) is not None + ), "Agent Creation Error: Agent name contains invalid characters. Only alphanumeric characters, spaces, hyphens, and brackets are allowed." try: llm = ModelFactory.get(self.llm_id, api_key=self.api_key) @@ -134,7 +136,7 @@ def run( max_tokens: int = 2048, max_iterations: int = 10, output_format: OutputFormat = OutputFormat.TEXT, - ) -> Dict: + ) -> AgentResponse: """Runs an agent call. Args: @@ -167,19 +169,42 @@ def run( max_iterations=max_iterations, output_format=output_format, ) - if response["status"] == "FAILED": + if response["status"] == ResponseStatus.FAILED: end = time.time() response["elapsed_time"] = end - start return response poll_url = response["url"] end = time.time() - response = self.sync_poll(poll_url, name=name, timeout=timeout, wait_time=wait_time) - return response + result = self.sync_poll(poll_url, name=name, timeout=timeout, wait_time=wait_time) + result_data = result.data + return AgentResponse( + status=ResponseStatus.SUCCESS, + completed=True, + data=AgentResponseData( + input=result_data.get("input"), + output=result_data.get("output"), + session_id=result_data.get("session_id"), + intermediate_steps=result_data.get("intermediate_steps"), + execution_stats=result_data.get("executionStats"), + ), + used_credits=result_data.get("usedCredits", 0.0), + run_time=result_data.get("runTime", end - start), + ) except Exception as e: msg = f"Error in request for {name} - {traceback.format_exc()}" logging.error(f"Agent Run: Error in running for {name}: {e}") end = time.time() - return {"status": "FAILED", "error": msg, "elapsed_time": end - start} + return AgentResponse( + status=ResponseStatus.FAILED, + data=AgentResponseData( + input=data, + output=None, + session_id=result_data.get("session_id"), + intermediate_steps=result_data.get("intermediate_steps"), + execution_stats=result_data.get("executionStats"), + ), + error=msg, + ) def run_async( self, @@ -193,7 +218,7 @@ def run_async( max_tokens: int = 2048, max_iterations: int = 10, output_format: OutputFormat = OutputFormat.TEXT, - ) -> Dict: + ) -> AgentResponse: """Runs asynchronously an agent call. Args: @@ -261,23 +286,24 @@ def run_async( payload.update(parameters) payload = json.dumps(payload) - r = _request_with_retry("post", self.url, headers=headers, data=payload) - logging.info(f"Agent Run Async: Start service for {name} - {self.url} - {payload} - {headers}") - - resp = None try: + r = _request_with_retry("post", self.url, headers=headers, data=payload) resp = r.json() - logging.info(f"Result of request for {name} - {r.status_code} - {resp}") - - poll_url = resp["data"] - response = {"status": "IN_PROGRESS", "url": poll_url} - except Exception: - response = {"status": "FAILED"} + poll_url = resp.get("data") + return AgentResponse( + status=ResponseStatus.IN_PROGRESS, + url=poll_url, + data=AgentResponseData(input=input_data), + run_time=0.0, + used_credits=0.0, + ) + except Exception as e: msg = f"Error in request for {name} - {traceback.format_exc()}" - logging.error(f"Agent Run Async: Error in running for {name}: {resp}") - if resp is not None: - response["error"] = msg - return response + logging.error(f"Agent Run Async: Error in running for {name}: {e}") + return AgentResponse( + status=ResponseStatus.FAILED, + error=msg, + ) def to_dict(self) -> Dict: return { @@ -285,6 +311,7 @@ def to_dict(self) -> Dict: "name": self.name, "assets": [tool.to_dict() for tool in self.tools], "description": self.description, + "role": self.role, "supplier": self.supplier.value["code"] if isinstance(self.supplier, Supplier) else self.supplier, "version": self.version, "llmId": self.llm_id, diff --git a/aixplain/modules/agent/agent_response.py b/aixplain/modules/agent/agent_response.py new file mode 100644 index 00000000..9ece7aa7 --- /dev/null +++ b/aixplain/modules/agent/agent_response.py @@ -0,0 +1,56 @@ +from aixplain.enums import ResponseStatus +from typing import Any, Dict, Optional, Text, Union, List +from aixplain.modules.agent.agent_response_data import AgentResponseData +from aixplain.modules.model.response import ModelResponse + + +class AgentResponse(ModelResponse): + def __init__( + self, + status: ResponseStatus = ResponseStatus.FAILED, + data: Optional[AgentResponseData] = None, + details: Optional[Union[Dict, List]] = {}, + completed: bool = False, + error_message: Text = "", + used_credits: float = 0.0, + run_time: float = 0.0, + usage: Optional[Dict] = None, + url: Optional[Text] = None, + **kwargs, + ): + + super().__init__( + status=status, + data="", + details=details, + completed=completed, + error_message=error_message, + used_credits=used_credits, + run_time=run_time, + usage=usage, + url=url, + **kwargs, + ) + self.data = data or AgentResponseData() + + def __getitem__(self, key: Text) -> Any: + if key == "data": + return self.data.to_dict() + return super().__getitem__(key) + + def __setitem__(self, key: Text, value: Any) -> None: + if key == "data" and isinstance(value, Dict): + self.data = AgentResponseData.from_dict(value) + elif key == "data" and isinstance(value, AgentResponseData): + self.data = value + else: + super().__setitem__(key, value) + + def to_dict(self) -> Dict[Text, Any]: + base_dict = super().to_dict() + base_dict["data"] = self.data.to_dict() + return base_dict + + def __repr__(self) -> str: + fields = super().__repr__().strip("ModelResponse(").rstrip(")") + return f"AgentResponse({fields})" diff --git a/aixplain/modules/agent/agent_response_data.py b/aixplain/modules/agent/agent_response_data.py new file mode 100644 index 00000000..6040be0c --- /dev/null +++ b/aixplain/modules/agent/agent_response_data.py @@ -0,0 +1,56 @@ +from typing import List, Dict, Any, Optional + + +class AgentResponseData: + def __init__( + self, + input: Optional[Any] = None, + output: Optional[Any] = None, + session_id: str = "", + intermediate_steps: Optional[List[Any]] = None, + execution_stats: Optional[Dict[str, Any]] = None, + ): + self.input = input + self.output = output + self.session_id = session_id + self.intermediate_steps = intermediate_steps or [] + self.execution_stats = execution_stats + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "AgentResponseData": + return cls( + input=data.get("input"), + output=data.get("output"), + session_id=data.get("session_id", ""), + intermediate_steps=data.get("intermediate_steps", []), + execution_stats=data.get("executionStats"), + ) + + def to_dict(self) -> Dict[str, Any]: + return { + "input": self.input, + "output": self.output, + "session_id": self.session_id, + "intermediate_steps": self.intermediate_steps, + "executionStats": self.execution_stats, + "execution_stats": self.execution_stats, + } + + def __getitem__(self, key): + return getattr(self, key, None) + + def __setitem__(self, key, value): + if hasattr(self, key): + setattr(self, key, value) + else: + raise KeyError(f"{key} is not a valid attribute of {self.__class__.__name__}") + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(" + f"input={self.input}, " + f"output={self.output}, " + f"session_id='{self.session_id}', " + f"intermediate_steps={self.intermediate_steps}, " + f"execution_stats={self.execution_stats})" + ) diff --git a/aixplain/modules/benchmark.py b/aixplain/modules/benchmark.py index 3f77cb07..d76b2e62 100644 --- a/aixplain/modules/benchmark.py +++ b/aixplain/modules/benchmark.py @@ -21,14 +21,12 @@ Benchmark Class """ import logging -from typing import List, Text, Dict, Optional +from typing import List, Text from aixplain.utils import config from aixplain.modules import Asset, Dataset, Metric, Model from aixplain.modules.benchmark_job import BenchmarkJob from urllib.parse import urljoin -import pandas as pd -from pathlib import Path -from aixplain.utils.file_utils import _request_with_retry, save_file +from aixplain.utils.file_utils import _request_with_retry class Benchmark(Asset): diff --git a/aixplain/modules/benchmark_job.py b/aixplain/modules/benchmark_job.py index 8fe13a19..29a33aa7 100644 --- a/aixplain/modules/benchmark_job.py +++ b/aixplain/modules/benchmark_job.py @@ -1,7 +1,6 @@ import logging -from typing import List, Text, Dict, Optional +from typing import Text, Dict, Optional from aixplain.utils import config -from aixplain.modules import Asset, Dataset, Metric, Model from urllib.parse import urljoin import pandas as pd from pathlib import Path diff --git a/aixplain/modules/model/__init__.py b/aixplain/modules/model/__init__.py index 104bcb62..05dd06b5 100644 --- a/aixplain/modules/model/__init__.py +++ b/aixplain/modules/model/__init__.py @@ -58,7 +58,7 @@ class Model(Asset): def __init__( self, id: Text, - name: Text, + name: Text = "", description: Text = "", api_key: Text = config.TEAM_API_KEY, supplier: Union[Dict, Text, Supplier, int] = "aiXplain", diff --git a/aixplain/modules/model/response.py b/aixplain/modules/model/response.py index 9cbbe4d8..ac9f8184 100644 --- a/aixplain/modules/model/response.py +++ b/aixplain/modules/model/response.py @@ -92,3 +92,19 @@ def __contains__(self, key: Text) -> bool: return True except KeyError: return False + + def to_dict(self) -> Dict[Text, Any]: + base_dict = { + "status": self.status, + "data": self.data, + "details": self.details, + "completed": self.completed, + "error_message": self.error_message, + "used_credits": self.used_credits, + "run_time": self.run_time, + "usage": self.usage, + "url": self.url, + } + if self.additional_fields: + base_dict.update(self.additional_fields) + return base_dict diff --git a/aixplain/modules/model/utility_model.py b/aixplain/modules/model/utility_model.py index d8324c62..3069ee41 100644 --- a/aixplain/modules/model/utility_model.py +++ b/aixplain/modules/model/utility_model.py @@ -20,10 +20,11 @@ """ import logging from aixplain.enums import Function, Supplier, DataType +from aixplain.enums.asset_status import AssetStatus from aixplain.modules.model import Model from aixplain.utils import config from aixplain.utils.file_utils import _request_with_retry -from aixplain.modules.model.utils import parse_code +from aixplain.modules.model.utils import parse_code_decorated from dataclasses import dataclass from typing import Callable, Union, Optional, List, Text, Dict from urllib.parse import urljoin @@ -42,6 +43,45 @@ def validate(self): def to_dict(self): return {"name": self.name, "description": self.description, "type": self.type.value} +# Tool decorator +def utility_tool(name: Text, description: Text, inputs: List[UtilityModelInput] = None, output_examples: Text = "", status = AssetStatus.DRAFT): + """Decorator for utility tool functions + + Args: + name: Name of the utility tool + description: Description of what the utility tool does + inputs: List of input parameters, must be UtilityModelInput objects + output_examples: Examples of expected outputs + status: Asset status + + Raises: + ValueError: If name or description is empty + TypeError: If inputs contains non-UtilityModelInput objects + """ + # Validate name and description + if not name or not name.strip(): + raise ValueError("Utility tool name cannot be empty") + if not description or not description.strip(): + raise ValueError("Utility tool description cannot be empty") + + # Validate inputs + if inputs is not None: + if not isinstance(inputs, list): + raise TypeError("Inputs must be a list of UtilityModelInput objects") + for input_param in inputs: + if not isinstance(input_param, UtilityModelInput): + raise TypeError(f"Invalid input parameter: {input_param}. All inputs must be UtilityModelInput objects") + + def decorator(func): + func._is_utility_tool = True # Mark function as utility tool + func._tool_name = name.strip() + func._tool_description = description.strip() + func._tool_inputs = inputs if inputs else [] + func._tool_output_examples = output_examples + func._tool_status = status + return func + return decorator + class UtilityModel(Model): """Ready-to-use Utility Model. @@ -65,8 +105,8 @@ class UtilityModel(Model): def __init__( self, id: Text, - name: Text, - code: Union[Text, Callable], + name: Optional[Text] = None, + code: Union[Text, Callable] = None, description: Optional[Text] = None, inputs: List[UtilityModelInput] = [], output_examples: Text = "", @@ -76,6 +116,7 @@ def __init__( function: Optional[Function] = None, is_subscribed: bool = False, cost: Optional[Dict] = None, + status: AssetStatus = AssetStatus.ONBOARDED,# TODO: change to draft when we have the backend ready **additional_info, ) -> None: """Utility Model Init @@ -113,16 +154,25 @@ def __init__( self.code = code self.inputs = inputs self.output_examples = output_examples + if isinstance(status, str): + try: + status = AssetStatus(status) + except Exception: + status = AssetStatus.DRAFT + self.status = status def validate(self): """Validate the Utility Model.""" description = None + name = None inputs = [] - # check if the model exists and if the code is strring with s3:// + # check if the model exists and if the code is strring with s3:// # if not, parse the code and update the description and inputs and do the validation # if yes, just do the validation on the description and inputs if not (self._model_exists() and str(self.code).startswith("s3://")): - self.code, inputs, description = parse_code(self.code) + self.code, inputs, description, name = parse_code_decorated(self.code) + if self.name is None: + self.name = name if self.description is None: self.description = description if len(self.inputs) == 0: @@ -131,7 +181,7 @@ def validate(self): input.validate() else: logging.info("Utility Model Already Exists, skipping code validation") - + assert description is not None or self.description is not None, "Utility Model Error: Model description is required" assert self.name and self.name.strip() != "", "Name is required" assert self.description and self.description.strip() != "", "Description is required" @@ -158,20 +208,21 @@ def to_dict(self): "code": self.code, "function": self.function.value, "outputDescription": self.output_examples, + "status": self.status.value, } def update(self): """Update the Utility Model.""" import warnings import inspect + # Get the current call stack stack = inspect.stack() - if len(stack) > 2 and stack[1].function != 'save': + if len(stack) > 2 and stack[1].function != "save": warnings.warn( - "update() is deprecated and will be removed in a future version. " - "Please use save() instead.", + "update() is deprecated and will be removed in a future version. " "Please use save() instead.", DeprecationWarning, - stacklevel=2 + stacklevel=2, ) self.validate() @@ -213,3 +264,9 @@ def delete(self): message = f"Utility Model Deletion Error: {response}" logging.error(message) raise Exception(f"{message}") + + def deploy(self) -> None: + assert self.status == AssetStatus.DRAFT, "Utility Model must be in draft status to be deployed." + assert self.status != AssetStatus.ONBOARDED, "Utility Model is already deployed." + self.status = AssetStatus.ONBOARDED + self.update() diff --git a/aixplain/modules/model/utils.py b/aixplain/modules/model/utils.py index f3691928..14f232d5 100644 --- a/aixplain/modules/model/utils.py +++ b/aixplain/modules/model/utils.py @@ -90,11 +90,12 @@ def parse_code(code: Union[Text, Callable]) -> Tuple[Text, List, Text]: from aixplain.factories.file_factory import FileFactory from uuid import uuid4 - inputs, description = [], "" + inputs, description, name = [], "", "" if isinstance(code, Callable): str_code = inspect.getsource(code) description = code.__doc__.strip() if code.__doc__ else "" + name = code.__name__ elif os.path.exists(code): with open(code, "r") as f: str_code = f.read() @@ -102,11 +103,27 @@ def parse_code(code: Union[Text, Callable]) -> Tuple[Text, List, Text]: str_code = requests.get(code).text else: str_code = code - + # assert str_code has a main function if "def main(" not in str_code: raise Exception("Utility Model Error: Code must have a main function") - + # get name of the function + name = re.search(r"def\s+([a-zA-Z_][a-zA-Z0-9_]*)\(", str_code).group(1) + + if not description: + # if the description is not provided, get the docstring of the function from string code after defining the function + # the docstring is the first line after the function definition + regex = r'def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\((.*?)\).*?(?:"""(.*?)"""|\'\'\'(.*?)\'\'\'|\#\s*(.*?)(?:\n|$)|$)' + match = re.search(regex, str_code, re.DOTALL) + if match: + function_name, params, triple_double_quote_doc, triple_single_quote_doc, single_line_comment = match.groups() + # Use the first non-None docstring found + description = (triple_double_quote_doc or + triple_single_quote_doc or + single_line_comment or "").strip() + else: + raise Exception("Utility Model Error:If the function is not decorated with @utility_tool, the description must be provided in the docstring") + # get parameters of the function f = re.findall(r"main\((.*?(?:\s*=\s*[^,)]+)?(?:\s*,\s*.*?(?:\s*=\s*[^,)]+)?)*)\)", str_code) parameters = f[0].split(",") if len(f) > 0 else [] @@ -141,4 +158,167 @@ def parse_code(code: Union[Text, Callable]) -> Tuple[Text, List, Text]: f.write(str_code) code = FileFactory.upload(local_path=local_path, is_temp=True) os.remove(local_path) - return code, inputs, description + return code, inputs, description, name + + +def parse_code_decorated(code: Union[Text, Callable]) -> Tuple[Text, List, Text]: + import inspect + import os + import re + import requests + import validators + from uuid import uuid4 + from aixplain.enums import DataType + from aixplain.modules.model.utility_model import UtilityModelInput + + from typing import Callable + from aixplain.factories.file_factory import FileFactory + + + inputs, description, name = [], "", "" + str_code = "" + + if isinstance(code, Callable) and hasattr(code, '_is_utility_tool'): + str_code = inspect.getsource(code) + # Use the information directly from the decorated callable + description = getattr(code, '_tool_description', None) if hasattr(code, '_tool_description') else code.__doc__.strip() if code.__doc__ else "" + name = getattr(code, '_tool_name', None) if hasattr(code, '_tool_name') else "" + if hasattr(code, '_tool_inputs') and code._tool_inputs != []: + inputs = getattr(code, '_tool_inputs', []) + else: + inputs_sig = inspect.signature(code).parameters + inputs = [] + for input_name, param in inputs_sig.items(): + if param.annotation != inspect.Parameter.empty: + input_type = param.annotation.__name__ + if input_type in ["int", "float"]: + input_type = DataType.NUMBER + elif input_type == "bool": + input_type = DataType.BOOLEAN + elif input_type == "str": + input_type = DataType.TEXT + inputs.append(UtilityModelInput(name=input_name, type=input_type, description=f"The '{input_name}' input is a {input_type}")) + elif isinstance(code, Callable): + # Handle case of non-decorated callable + str_code = inspect.getsource(code) + description = code.__doc__.strip() if code.__doc__ else "" + name = code.__name__ + #Try to infer parameters + params_match = re.search(r"def\s+\w+\s*\((.*?)\):",str_code) + parameters = params_match.group(1).split(",") if params_match else [] + + for input in parameters: + if not input: + continue + assert ( + len(input.split(":")) > 1 + ), "Utility Model Error: Input type is required. For instance def main(a: int, b: int) -> int:" + input_name, input_type = input.split(":") + input_name = input_name.strip() + input_type = input_type.split("=")[0].strip() + + if input_type in ["int", "float"]: + input_type = "number" + inputs.append( + UtilityModelInput(name=input_name, type=DataType.NUMBER, description=f"The {input_name} input is a number") + ) + elif input_type == "bool": + input_type = "boolean" + inputs.append( + UtilityModelInput(name=input_name, type=DataType.BOOLEAN, description=f"The {input_name} input is a boolean") + ) + elif input_type == "str": + input_type = "text" + inputs.append( + UtilityModelInput(name=input_name, type=DataType.TEXT, description=f"The {input_name} input is a text") + ) + else: + raise Exception(f"Utility Model Error: Unsupported input type: {input_type}") + elif isinstance(code, str): + # if code is string do the parsing and parameter extraction as before + if os.path.exists(code): + with open(code, "r") as f: + str_code = f.read() + elif validators.url(code): + str_code = requests.get(code).text + else: + str_code = code + + # New regex with capture groups + # regex = r"@utility_tool\s*\((.*?)\)\s*def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\((.*?)\)\s*->\s*[a-zA-Z_][a-zA-Z0-9_]*\s*:" + regex = r"@utility_tool\s*\((.*?)\)\s*def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\((.*?)\)" + matches = re.findall(regex, str_code, re.DOTALL) + + if not matches: + return parse_code(code) + + tool_match = matches[0] #we expect only 1 match + decorator_params = tool_match[0] + function_name = tool_match[1] + parameters_str = tool_match[2] + + # Extract name and description + name_match = re.search(r"name\s*=\s*[\"'](.*?)[\"']", decorator_params) + name = name_match.group(1) if name_match else "" + + description_match = re.search(r"description\s*=\s*[\"'](.*?)[\"']", decorator_params) + description = description_match.group(1) if description_match else "" + # Extract parameters + parameters = [param.strip() for param in parameters_str.split(",")] if parameters_str else [] + + # Process parameters if 'inputs' are not explicitly defined in decorator. + # Process parameters for inputs + if 'inputs' not in decorator_params: #<-- Check here + parameters = [param.strip() for param in parameters_str.split(",")] if parameters_str else [] + for input_str in parameters: + if not input_str: + continue + assert ( + len(input_str.split(":")) > 1 + ), "Utility Model Error: Input type is required. For instance def main(a: int, b: int) -> int:" + input_name, input_type = input_str.split(":") + input_name = input_name.strip() + input_type = input_type.split("=")[0].strip() + + if input_type in ["int", "float"]: + inputs.append( + UtilityModelInput(name=input_name, type=DataType.NUMBER, description=f"The {input_name} input is a number") + ) + elif input_type == "bool": + inputs.append( + UtilityModelInput(name=input_name, type=DataType.BOOLEAN, description=f"The {input_name} input is a boolean") + ) + elif input_type == "str": + inputs.append( + UtilityModelInput(name=input_name, type=DataType.TEXT, description=f"The {input_name} input is a text") + ) + else: + raise Exception(f"Utility Model Error: Unsupported input type: {input_type}") + else: + # try to parse from the decorator inputs + input_matches = re.finditer(r"UtilityModelInput\s*\(\s*name\s*=\s*[\"'](.*?)[\"']\s*,\s*type\s*=\s*DataType\.([A-Z]+)\s*,\s*description\s*=\s*[\"'](.*?)[\"']\s*\)", decorator_params) + for match in input_matches: + input_name = match.group(1) + input_type = match.group(2) + input_description = match.group(3) + input_type = DataType(input_type.lower()) + try: + inputs.append( + UtilityModelInput(name=input_name, type=input_type, description=input_description) + ) + except ValueError: + raise Exception(f"Utility Model Error: Unsupported input type: {input_type}") + + # ! rempves the decorator from the code for the backend to be able to run the code and rename the function as main + str_code = re.sub(r"(@utility_tool\(.*?\)\s*)?def\s+\w+", "def main", str_code, flags=re.DOTALL)# TODO: this should be corrected on the backend side and updated in later versions + if "utility_tool" in str_code: + raise Exception("Utility Model Error: Code must be decorated with @utility_tool and have a function defined.") + if "def main" not in str_code: + raise Exception("Utility Model Error: Code must have a function defined.") + local_path = str(uuid4()) + with open(local_path, "w") as f: + f.write(str_code) + code = FileFactory.upload(local_path=local_path, is_temp=True) + os.remove(local_path) + + return code, inputs, description, name \ No newline at end of file diff --git a/aixplain/modules/pipeline/designer/base.py b/aixplain/modules/pipeline/designer/base.py index 08d4c8c5..55300467 100644 --- a/aixplain/modules/pipeline/designer/base.py +++ b/aixplain/modules/pipeline/designer/base.py @@ -149,17 +149,13 @@ def __init__( to_param = to_param.code assert from_param in from_node.outputs, ( - "Invalid from param. " - "Make sure all input params are already linked accordingly" + "Invalid from param. " "Make sure all input params are already linked accordingly" ) fp_instance = from_node.outputs[from_param] from .nodes import Decision - if ( - isinstance(to_node, Decision) - and to_param == to_node.inputs.passthrough.code - ): + if isinstance(to_node, Decision) and to_param == to_node.inputs.passthrough.code: if from_param not in to_node.outputs: to_node.outputs.create_param( from_param, diff --git a/aixplain/modules/pipeline/designer/nodes.py b/aixplain/modules/pipeline/designer/nodes.py index fbe27991..35afe4b9 100644 --- a/aixplain/modules/pipeline/designer/nodes.py +++ b/aixplain/modules/pipeline/designer/nodes.py @@ -79,9 +79,7 @@ def populate_asset(self): if self.function: if self.asset.function.value != self.function: - raise ValueError( - f"Function {self.function} is not supported by asset {self.asset_id}" - ) + raise ValueError(f"Function {self.function} is not supported by asset {self.asset_id}") # Despite function field has been set, we should still dynamically # populate parameters for Utility functions @@ -238,12 +236,7 @@ class Output(Node[OutputInputs, OutputOutputs]): inputs_class: Type[TI] = OutputInputs outputs_class: Type[TO] = OutputOutputs - def __init__( - self, - data_types: Optional[List[DataType]] = None, - pipeline: "DesignerPipeline" = None, - **kwargs - ): + def __init__(self, data_types: Optional[List[DataType]] = None, pipeline: "DesignerPipeline" = None, **kwargs): super().__init__(pipeline=pipeline, **kwargs) self.data_types = data_types or [] @@ -304,14 +297,7 @@ class Route(Serializable): operation: Operation type: RouteType - def __init__( - self, - value: DataType, - path: List[Union[Node, int]], - operation: Operation, - type: RouteType, - **kwargs - ): + def __init__(self, value: DataType, path: List[Union[Node, int]], operation: Operation, type: RouteType, **kwargs): """ Post init method to convert the nodes to node numbers if they are nodes. @@ -326,9 +312,7 @@ def __init__( # raise ValueError("Path is not valid, should be a list of nodes") # convert nodes to node numbers if they are nodes - self.path = [ - node.number if isinstance(node, Node) else node for node in self.path - ] + self.path = [node.number if isinstance(node, Node) else node for node in self.path] def serialize(self) -> dict: return { @@ -366,9 +350,7 @@ class Router(Node[RouterInputs, RouterOutputs], LinkableMixin): inputs_class: Type[TI] = RouterInputs outputs_class: Type[TO] = RouterOutputs - def __init__( - self, routes: List[Route], pipeline: "DesignerPipeline" = None, **kwargs - ): + def __init__(self, routes: List[Route], pipeline: "DesignerPipeline" = None, **kwargs): super().__init__(pipeline=pipeline, **kwargs) self.routes = routes @@ -407,9 +389,7 @@ class Decision(Node[DecisionInputs, DecisionOutputs], LinkableMixin): inputs_class: Type[TI] = DecisionInputs outputs_class: Type[TO] = DecisionOutputs - def __init__( - self, routes: List[Route], pipeline: "DesignerPipeline" = None, **kwargs - ): + def __init__(self, routes: List[Route], pipeline: "DesignerPipeline" = None, **kwargs): super().__init__(pipeline=pipeline, **kwargs) self.routes = routes diff --git a/aixplain/modules/pipeline/designer/pipeline.py b/aixplain/modules/pipeline/designer/pipeline.py index 58c46112..bf9c74a6 100644 --- a/aixplain/modules/pipeline/designer/pipeline.py +++ b/aixplain/modules/pipeline/designer/pipeline.py @@ -153,9 +153,7 @@ def special_prompt_validation(self, node: Node): node.inputs.text.is_required = False for match in matches: if match not in node.inputs: - raise ValueError( - f"Param {match} of node {node.label} should be defined and set" - ) + raise ValueError(f"Param {match} of node {node.label} should be defined and set") def validate_params(self): """ @@ -167,9 +165,7 @@ def validate_params(self): self.special_prompt_validation(node) for param in node.inputs: if param.is_required and not self.is_param_set(node, param): - raise ValueError( - f"Param {param.code} of node {node.label} is required" - ) + raise ValueError(f"Param {param.code} of node {node.label} is required") def validate(self): """ @@ -195,11 +191,7 @@ def get_link(self, from_node: int, to_node: int) -> Link: :return: the link """ return next( - ( - link - for link in self.links - if link.from_node == from_node and link.to_node == to_node - ), + (link for link in self.links if link.from_node == from_node and link.to_node == to_node), None, ) @@ -245,9 +237,7 @@ def infer_data_type(node): infer_data_type(self) infer_data_type(to_node) - def asset( - self, asset_id: str, *args, asset_class: Type[T] = AssetNode, **kwargs - ) -> T: + def asset(self, asset_id: str, *args, asset_class: Type[T] = AssetNode, **kwargs) -> T: """ Shortcut to create an asset node for the current pipeline. All params will be passed as keyword arguments to the node @@ -258,9 +248,7 @@ def asset( """ return asset_class(asset_id, *args, pipeline=self, **kwargs) - def utility( - self, asset_id: str, *args, asset_class: Type[T] = Utility, **kwargs - ) -> T: + def utility(self, asset_id: str, *args, asset_class: Type[T] = Utility, **kwargs) -> T: """ Shortcut to create an utility nodes for the current pipeline. All params will be passed as keyword arguments to the node diff --git a/aixplain/modules/pipeline/generate.py b/aixplain/modules/pipeline/generate.py index eeb36412..e96da334 100644 --- a/aixplain/modules/pipeline/generate.py +++ b/aixplain/modules/pipeline/generate.py @@ -151,9 +151,7 @@ def populate_specs(functions: list): # slugify function name by trimming some special chars and # transforming it to snake case - function_name = ( - function["id"].replace("-", "_").replace("(", "_").replace(")", "_") - ) + function_name = function["id"].replace("-", "_").replace("(", "_").replace(")", "_") base_class = "AssetNode" is_segmentor = function["id"] in SEGMENTOR_FUNCTIONS is_reconstructor = function["id"] in RECONSTRUCTOR_FUNCTIONS @@ -161,9 +159,7 @@ def populate_specs(functions: list): base_class = "BaseSegmentor" elif is_reconstructor: base_class = "BaseReconstructor" - elif "metric" in function_name.split( - "_" - ): # noqa: Advise a better distinguisher please + elif "metric" in function_name.split("_"): # noqa: Advise a better distinguisher please base_class = "BaseMetric" spec = { diff --git a/aixplain/modules/pipeline/pipeline.py b/aixplain/modules/pipeline/pipeline.py index 27091770..b566035e 100644 --- a/aixplain/modules/pipeline/pipeline.py +++ b/aixplain/modules/pipeline/pipeline.py @@ -4,18 +4,7 @@ from typing import Union, Type from aixplain.enums import DataType -from .designer import ( - InputParam, - OutputParam, - Inputs, - Outputs, - TI, - TO, - AssetNode, - BaseReconstructor, - BaseSegmentor, - BaseMetric -) +from .designer import InputParam, OutputParam, Inputs, Outputs, TI, TO, AssetNode, BaseReconstructor, BaseSegmentor, BaseMetric from .default import DefaultPipeline from aixplain.modules import asset @@ -38,13 +27,14 @@ def __init__(self, node=None): class ObjectDetection(AssetNode[ObjectDetectionInputs, ObjectDetectionOutputs]): """ - Object Detection is a computer vision technology that identifies and locates -objects within an image, typically by drawing bounding boxes around the -detected objects and classifying them into predefined categories. + Object Detection is a computer vision technology that identifies and locates + objects within an image, typically by drawing bounding boxes around the + detected objects and classifying them into predefined categories. - InputType: video - OutputType: text + InputType: video + OutputType: text """ + function: str = "object-detection" input_type: str = DataType.VIDEO output_type: str = DataType.TEXT @@ -71,12 +61,13 @@ def __init__(self, node=None): class LanguageIdentification(AssetNode[LanguageIdentificationInputs, LanguageIdentificationOutputs]): """ - Detects the language in which a given text is written, aiding in multilingual -platforms or content localization. + Detects the language in which a given text is written, aiding in multilingual + platforms or content localization. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "language-identification" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -105,13 +96,14 @@ def __init__(self, node=None): class DepthEstimation(AssetNode[DepthEstimationInputs, DepthEstimationOutputs]): """ - Depth estimation is a computational process that determines the distance of -objects from a viewpoint, typically using visual data from cameras or sensors -to create a three-dimensional understanding of a scene. + Depth estimation is a computational process that determines the distance of + objects from a viewpoint, typically using visual data from cameras or sensors + to create a three-dimensional understanding of a scene. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "depth-estimation" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -138,13 +130,14 @@ def __init__(self, node=None): class ScriptExecution(AssetNode[ScriptExecutionInputs, ScriptExecutionOutputs]): """ - Script Execution refers to the process of running a set of programmed -instructions or code within a computing environment, enabling the automated -performance of tasks, calculations, or operations as defined by the script. + Script Execution refers to the process of running a set of programmed + instructions or code within a computing environment, enabling the automated + performance of tasks, calculations, or operations as defined by the script. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "script-execution" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -173,14 +166,15 @@ def __init__(self, node=None): class ImageEmbedding(AssetNode[ImageEmbeddingInputs, ImageEmbeddingOutputs]): """ - Image Embedding is a process that transforms an image into a fixed-dimensional -vector representation, capturing its essential features and enabling efficient -comparison, retrieval, and analysis in various machine learning and computer -vision tasks. + Image Embedding is a process that transforms an image into a fixed-dimensional + vector representation, capturing its essential features and enabling efficient + comparison, retrieval, and analysis in various machine learning and computer + vision tasks. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "image-embedding" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -209,14 +203,15 @@ def __init__(self, node=None): class ImageToVideoGeneration(AssetNode[ImageToVideoGenerationInputs, ImageToVideoGenerationOutputs]): """ - The Image To Video Generation function transforms a series of static images -into a cohesive, dynamic video sequence, often incorporating transitions, -effects, and synchronization with audio to create a visually engaging -narrative. + The Image To Video Generation function transforms a series of static images + into a cohesive, dynamic video sequence, often incorporating transitions, + effects, and synchronization with audio to create a visually engaging + narrative. - InputType: image - OutputType: video + InputType: image + OutputType: video """ + function: str = "image-to-video-generation" input_type: str = DataType.IMAGE output_type: str = DataType.VIDEO @@ -243,14 +238,15 @@ def __init__(self, node=None): class ImageImpainting(AssetNode[ImageImpaintingInputs, ImageImpaintingOutputs]): """ - Image inpainting is a process that involves filling in missing or damaged parts -of an image in a way that is visually coherent and seamlessly blends with the -surrounding areas, often using advanced algorithms and techniques to restore -the image to its original or intended appearance. + Image inpainting is a process that involves filling in missing or damaged parts + of an image in a way that is visually coherent and seamlessly blends with the + surrounding areas, often using advanced algorithms and techniques to restore + the image to its original or intended appearance. - InputType: image - OutputType: image + InputType: image + OutputType: image """ + function: str = "image-impainting" input_type: str = DataType.IMAGE output_type: str = DataType.IMAGE @@ -277,14 +273,15 @@ def __init__(self, node=None): class StyleTransfer(AssetNode[StyleTransferInputs, StyleTransferOutputs]): """ - Style Transfer is a technique in artificial intelligence that applies the -visual style of one image (such as the brushstrokes of a famous painting) to -the content of another image, effectively blending the artistic elements of the -first image with the subject matter of the second. + Style Transfer is a technique in artificial intelligence that applies the + visual style of one image (such as the brushstrokes of a famous painting) to + the content of another image, effectively blending the artistic elements of the + first image with the subject matter of the second. - InputType: image - OutputType: image + InputType: image + OutputType: image """ + function: str = "style-transfer" input_type: str = DataType.IMAGE output_type: str = DataType.IMAGE @@ -313,13 +310,14 @@ def __init__(self, node=None): class MultiClassTextClassification(AssetNode[MultiClassTextClassificationInputs, MultiClassTextClassificationOutputs]): """ - Multi Class Text Classification is a natural language processing task that -involves categorizing a given text into one of several predefined classes or -categories based on its content. + Multi Class Text Classification is a natural language processing task that + involves categorizing a given text into one of several predefined classes or + categories based on its content. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "multi-class-text-classification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -348,14 +346,15 @@ def __init__(self, node=None): class PartOfSpeechTagging(AssetNode[PartOfSpeechTaggingInputs, PartOfSpeechTaggingOutputs]): """ - Part of Speech Tagging is a natural language processing task that involves -assigning each word in a sentence its corresponding part of speech, such as -noun, verb, adjective, or adverb, based on its role and context within the -sentence. + Part of Speech Tagging is a natural language processing task that involves + assigning each word in a sentence its corresponding part of speech, such as + noun, verb, adjective, or adverb, based on its role and context within the + sentence. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "part-of-speech-tagging" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -382,14 +381,15 @@ def __init__(self, node=None): class MetricAggregation(BaseMetric[MetricAggregationInputs, MetricAggregationOutputs]): """ - Metric Aggregation is a function that computes and summarizes numerical data by -applying statistical operations, such as averaging, summing, or finding the -minimum and maximum values, to provide insights and facilitate analysis of -large datasets. + Metric Aggregation is a function that computes and summarizes numerical data by + applying statistical operations, such as averaging, summing, or finding the + minimum and maximum values, to provide insights and facilitate analysis of + large datasets. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "metric-aggregation" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -416,14 +416,15 @@ def __init__(self, node=None): class ImageColorization(AssetNode[ImageColorizationInputs, ImageColorizationOutputs]): """ - Image colorization is a process that involves adding color to grayscale images, -transforming them from black-and-white to full-color representations, often -using advanced algorithms and machine learning techniques to predict and apply -the appropriate hues and shades. + Image colorization is a process that involves adding color to grayscale images, + transforming them from black-and-white to full-color representations, often + using advanced algorithms and machine learning techniques to predict and apply + the appropriate hues and shades. - InputType: image - OutputType: image + InputType: image + OutputType: image """ + function: str = "image-colorization" input_type: str = DataType.IMAGE output_type: str = DataType.IMAGE @@ -452,14 +453,15 @@ def __init__(self, node=None): class IntentClassification(AssetNode[IntentClassificationInputs, IntentClassificationOutputs]): """ - Intent Classification is a natural language processing task that involves -analyzing and categorizing user text input to determine the underlying purpose -or goal behind the communication, such as booking a flight, asking for weather -information, or setting a reminder. + Intent Classification is a natural language processing task that involves + analyzing and categorizing user text input to determine the underlying purpose + or goal behind the communication, such as booking a flight, asking for weather + information, or setting a reminder. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "intent-classification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -486,14 +488,15 @@ def __init__(self, node=None): class AudioIntentDetection(AssetNode[AudioIntentDetectionInputs, AudioIntentDetectionOutputs]): """ - Audio Intent Detection is a process that involves analyzing audio signals to -identify and interpret the underlying intentions or purposes behind spoken -words, enabling systems to understand and respond appropriately to human -speech. + Audio Intent Detection is a process that involves analyzing audio signals to + identify and interpret the underlying intentions or purposes behind spoken + words, enabling systems to understand and respond appropriately to human + speech. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "audio-intent-detection" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -522,13 +525,14 @@ def __init__(self, node=None): class AsrQualityEstimation(AssetNode[AsrQualityEstimationInputs, AsrQualityEstimationOutputs]): """ - ASR Quality Estimation is a process that evaluates the accuracy and reliability -of automatic speech recognition systems by analyzing their performance in -transcribing spoken language into text. + ASR Quality Estimation is a process that evaluates the accuracy and reliability + of automatic speech recognition systems by analyzing their performance in + transcribing spoken language into text. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "asr-quality-estimation" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -555,13 +559,14 @@ def __init__(self, node=None): class Search(AssetNode[SearchInputs, SearchOutputs]): """ - An algorithm that identifies and returns data or items that match particular -keywords or conditions from a dataset. A fundamental tool for databases and -websites. + An algorithm that identifies and returns data or items that match particular + keywords or conditions from a dataset. A fundamental tool for databases and + websites. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "search" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -594,13 +599,14 @@ def __init__(self, node=None): class VisemeGeneration(AssetNode[VisemeGenerationInputs, VisemeGenerationOutputs]): """ - Viseme Generation is the process of creating visual representations of -phonemes, which are the distinct units of sound in speech, to synchronize lip -movements with spoken words in animations or virtual avatars. + Viseme Generation is the process of creating visual representations of + phonemes, which are the distinct units of sound in speech, to synchronize lip + movements with spoken words in animations or virtual avatars. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "viseme-generation" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -629,12 +635,13 @@ def __init__(self, node=None): class Ocr(AssetNode[OcrInputs, OcrOutputs]): """ - Converts images of typed, handwritten, or printed text into machine-encoded -text. Used in digitizing printed texts for data retrieval. + Converts images of typed, handwritten, or printed text into machine-encoded + text. Used in digitizing printed texts for data retrieval. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "ocr" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -661,14 +668,15 @@ def __init__(self, node=None): class Loglikelihood(AssetNode[LoglikelihoodInputs, LoglikelihoodOutputs]): """ - The Log Likelihood function measures the probability of observing the given -data under a specific statistical model by taking the natural logarithm of the -likelihood function, thereby transforming the product of probabilities into a -sum, which simplifies the process of optimization and parameter estimation. + The Log Likelihood function measures the probability of observing the given + data under a specific statistical model by taking the natural logarithm of the + likelihood function, thereby transforming the product of probabilities into a + sum, which simplifies the process of optimization and parameter estimation. - InputType: text - OutputType: number + InputType: text + OutputType: number """ + function: str = "loglikelihood" input_type: str = DataType.TEXT output_type: str = DataType.NUMBER @@ -697,13 +705,14 @@ def __init__(self, node=None): class VideoEmbedding(AssetNode[VideoEmbeddingInputs, VideoEmbeddingOutputs]): """ - Video Embedding is a process that transforms video content into a fixed- -dimensional vector representation, capturing essential features and patterns to -facilitate tasks such as retrieval, classification, and recommendation. + Video Embedding is a process that transforms video content into a fixed- + dimensional vector representation, capturing essential features and patterns to + facilitate tasks such as retrieval, classification, and recommendation. - InputType: video - OutputType: embedding + InputType: video + OutputType: embedding """ + function: str = "video-embedding" input_type: str = DataType.VIDEO output_type: str = DataType.EMBEDDING @@ -732,13 +741,14 @@ def __init__(self, node=None): class TextSegmenation(AssetNode[TextSegmenationInputs, TextSegmenationOutputs]): """ - Text Segmentation is the process of dividing a continuous text into meaningful -units, such as words, sentences, or topics, to facilitate easier analysis and -understanding. + Text Segmentation is the process of dividing a continuous text into meaningful + units, such as words, sentences, or topics, to facilitate easier analysis and + understanding. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-segmenation" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -765,13 +775,14 @@ def __init__(self, node=None): class ExpressionDetection(AssetNode[ExpressionDetectionInputs, ExpressionDetectionOutputs]): """ - Expression Detection is the process of identifying and analyzing facial -expressions to interpret emotions or intentions using AI and computer vision -techniques. + Expression Detection is the process of identifying and analyzing facial + expressions to interpret emotions or intentions using AI and computer vision + techniques. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "expression-detection" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -804,12 +815,13 @@ def __init__(self, node=None): class SpeechClassification(AssetNode[SpeechClassificationInputs, SpeechClassificationOutputs]): """ - Categorizes audio clips based on their content, aiding in content organization -and targeted actions. + Categorizes audio clips based on their content, aiding in content organization + and targeted actions. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "speech-classification" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -836,13 +848,14 @@ def __init__(self, node=None): class InverseTextNormalization(AssetNode[InverseTextNormalizationInputs, InverseTextNormalizationOutputs]): """ - Inverse Text Normalization is the process of converting spoken or written -language in its normalized form, such as numbers, dates, and abbreviations, -back into their original, more complex or detailed textual representations. + Inverse Text Normalization is the process of converting spoken or written + language in its normalized form, such as numbers, dates, and abbreviations, + back into their original, more complex or detailed textual representations. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "inverse-text-normalization" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -869,12 +882,13 @@ def __init__(self, node=None): class ExtractAudioFromVideo(AssetNode[ExtractAudioFromVideoInputs, ExtractAudioFromVideoOutputs]): """ - Isolates and extracts audio tracks from video files, aiding in audio analysis -or transcription tasks. + Isolates and extracts audio tracks from video files, aiding in audio analysis + or transcription tasks. - InputType: video - OutputType: audio + InputType: video + OutputType: audio """ + function: str = "extract-audio-from-video" input_type: str = DataType.VIDEO output_type: str = DataType.AUDIO @@ -903,12 +917,13 @@ def __init__(self, node=None): class ImageCompression(AssetNode[ImageCompressionInputs, ImageCompressionOutputs]): """ - Reduces the size of image files without significantly compromising their visual -quality. Useful for optimizing storage and improving webpage load times. + Reduces the size of image files without significantly compromising their visual + quality. Useful for optimizing storage and improving webpage load times. - InputType: image - OutputType: image + InputType: image + OutputType: image """ + function: str = "image-compression" input_type: str = DataType.IMAGE output_type: str = DataType.IMAGE @@ -935,13 +950,14 @@ def __init__(self, node=None): class NoiseRemoval(AssetNode[NoiseRemovalInputs, NoiseRemovalOutputs]): """ - Noise Removal is a process that involves identifying and eliminating unwanted -random variations or disturbances from an audio signal to enhance the clarity -and quality of the underlying information. + Noise Removal is a process that involves identifying and eliminating unwanted + random variations or disturbances from an audio signal to enhance the clarity + and quality of the underlying information. - InputType: audio - OutputType: audio + InputType: audio + OutputType: audio """ + function: str = "noise-removal" input_type: str = DataType.AUDIO output_type: str = DataType.AUDIO @@ -974,12 +990,13 @@ def __init__(self, node=None): class TextSummarization(AssetNode[TextSummarizationInputs, TextSummarizationOutputs]): """ - Extracts the main points from a larger body of text, producing a concise -summary without losing the primary message. + Extracts the main points from a larger body of text, producing a concise + summary without losing the primary message. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-summarization" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -1012,14 +1029,15 @@ def __init__(self, node=None): class TextGenerationMetric(BaseMetric[TextGenerationMetricInputs, TextGenerationMetricOutputs]): """ - A Text Generation Metric is a quantitative measure used to evaluate the quality -and effectiveness of text produced by natural language processing models, often -assessing aspects such as coherence, relevance, fluency, and adherence to given -prompts or instructions. + A Text Generation Metric is a quantitative measure used to evaluate the quality + and effectiveness of text produced by natural language processing models, often + assessing aspects such as coherence, relevance, fluency, and adherence to given + prompts or instructions. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-generation-metric" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -1046,14 +1064,15 @@ def __init__(self, node=None): class ImageCaptioning(AssetNode[ImageCaptioningInputs, ImageCaptioningOutputs]): """ - Image Captioning is a process that involves generating a textual description of -an image, typically using machine learning models to analyze the visual content -and produce coherent and contextually relevant sentences that describe the -objects, actions, and scenes depicted in the image. + Image Captioning is a process that involves generating a textual description of + an image, typically using machine learning models to analyze the visual content + and produce coherent and contextually relevant sentences that describe the + objects, actions, and scenes depicted in the image. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "image-captioning" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -1084,13 +1103,14 @@ def __init__(self, node=None): class BenchmarkScoringMt(AssetNode[BenchmarkScoringMtInputs, BenchmarkScoringMtOutputs]): """ - Benchmark Scoring MT is a function designed to evaluate and score machine -translation systems by comparing their output against a set of predefined -benchmarks, thereby assessing their accuracy and performance. + Benchmark Scoring MT is a function designed to evaluate and score machine + translation systems by comparing their output against a set of predefined + benchmarks, thereby assessing their accuracy and performance. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "benchmark-scoring-mt" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1125,12 +1145,13 @@ def __init__(self, node=None): class SpeakerDiarizationAudio(BaseSegmentor[SpeakerDiarizationAudioInputs, SpeakerDiarizationAudioOutputs]): """ - Identifies individual speakers and their respective speech segments within an -audio clip. Ideal for multi-speaker recordings or conference calls. + Identifies individual speakers and their respective speech segments within an + audio clip. Ideal for multi-speaker recordings or conference calls. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "speaker-diarization-audio" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -1161,13 +1182,14 @@ def __init__(self, node=None): class BenchmarkScoringAsr(AssetNode[BenchmarkScoringAsrInputs, BenchmarkScoringAsrOutputs]): """ - Benchmark Scoring ASR is a function that evaluates and compares the performance -of automatic speech recognition systems by analyzing their accuracy, speed, and -other relevant metrics against a standardized set of benchmarks. + Benchmark Scoring ASR is a function that evaluates and compares the performance + of automatic speech recognition systems by analyzing their accuracy, speed, and + other relevant metrics against a standardized set of benchmarks. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "benchmark-scoring-asr" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -1198,13 +1220,14 @@ def __init__(self, node=None): class VisualQuestionAnswering(AssetNode[VisualQuestionAnsweringInputs, VisualQuestionAnsweringOutputs]): """ - Visual Question Answering (VQA) is a task in artificial intelligence that -involves analyzing an image and providing accurate, contextually relevant -answers to questions posed about the visual content of that image. + Visual Question Answering (VQA) is a task in artificial intelligence that + involves analyzing an image and providing accurate, contextually relevant + answers to questions posed about the visual content of that image. - InputType: image - OutputType: video + InputType: image + OutputType: video """ + function: str = "visual-question-answering" input_type: str = DataType.IMAGE output_type: str = DataType.VIDEO @@ -1231,13 +1254,14 @@ def __init__(self, node=None): class DocumentImageParsing(AssetNode[DocumentImageParsingInputs, DocumentImageParsingOutputs]): """ - Document Image Parsing is the process of analyzing and converting scanned or -photographed images of documents into structured, machine-readable formats by -identifying and extracting text, layout, and other relevant information. + Document Image Parsing is the process of analyzing and converting scanned or + photographed images of documents into structured, machine-readable formats by + identifying and extracting text, layout, and other relevant information. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "document-image-parsing" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -1266,14 +1290,15 @@ def __init__(self, node=None): class MultiLabelTextClassification(AssetNode[MultiLabelTextClassificationInputs, MultiLabelTextClassificationOutputs]): """ - Multi Label Text Classification is a natural language processing task where a -given text is analyzed and assigned multiple relevant labels or categories from -a predefined set, allowing for the text to belong to more than one category -simultaneously. + Multi Label Text Classification is a natural language processing task where a + given text is analyzed and assigned multiple relevant labels or categories from + a predefined set, allowing for the text to belong to more than one category + simultaneously. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "multi-label-text-classification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1300,12 +1325,13 @@ def __init__(self, node=None): class TextReconstruction(BaseReconstructor[TextReconstructionInputs, TextReconstructionOutputs]): """ - Text Reconstruction is a process that involves piecing together fragmented or -incomplete text data to restore it to its original, coherent form. + Text Reconstruction is a process that involves piecing together fragmented or + incomplete text data to restore it to its original, coherent form. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-reconstruction" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -1334,12 +1360,13 @@ def __init__(self, node=None): class VideoContentModeration(AssetNode[VideoContentModerationInputs, VideoContentModerationOutputs]): """ - Automatically reviews video content to detect and possibly remove inappropriate -or harmful material. Essential for user-generated content platforms. + Automatically reviews video content to detect and possibly remove inappropriate + or harmful material. Essential for user-generated content platforms. - InputType: video - OutputType: label + InputType: video + OutputType: label """ + function: str = "video-content-moderation" input_type: str = DataType.VIDEO output_type: str = DataType.LABEL @@ -1368,13 +1395,14 @@ def __init__(self, node=None): class MultilingualSpeechRecognition(AssetNode[MultilingualSpeechRecognitionInputs, MultilingualSpeechRecognitionOutputs]): """ - Multilingual Speech Recognition is a technology that enables the automatic -transcription of spoken language into text across multiple languages, allowing -for seamless communication and understanding in diverse linguistic contexts. + Multilingual Speech Recognition is a technology that enables the automatic + transcription of spoken language into text across multiple languages, allowing + for seamless communication and understanding in diverse linguistic contexts. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "multilingual-speech-recognition" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -1405,12 +1433,13 @@ def __init__(self, node=None): class EntityLinking(AssetNode[EntityLinkingInputs, EntityLinkingOutputs]): """ - Associates identified entities in the text with specific entries in a knowledge -base or database. + Associates identified entities in the text with specific entries in a knowledge + base or database. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "entity-linking" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1437,13 +1466,14 @@ def __init__(self, node=None): class AudioReconstruction(BaseReconstructor[AudioReconstructionInputs, AudioReconstructionOutputs]): """ - Audio Reconstruction is the process of restoring or recreating audio signals -from incomplete, damaged, or degraded recordings to achieve a high-quality, -accurate representation of the original sound. + Audio Reconstruction is the process of restoring or recreating audio signals + from incomplete, damaged, or degraded recordings to achieve a high-quality, + accurate representation of the original sound. - InputType: audio - OutputType: audio + InputType: audio + OutputType: audio """ + function: str = "audio-reconstruction" input_type: str = DataType.AUDIO output_type: str = DataType.AUDIO @@ -1470,13 +1500,14 @@ def __init__(self, node=None): class AudioEmotionDetection(AssetNode[AudioEmotionDetectionInputs, AudioEmotionDetectionOutputs]): """ - Audio Emotion Detection is a technology that analyzes vocal characteristics and -patterns in audio recordings to identify and classify the emotional state of -the speaker. + Audio Emotion Detection is a technology that analyzes vocal characteristics and + patterns in audio recordings to identify and classify the emotional state of + the speaker. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "audio-emotion-detection" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -1505,12 +1536,13 @@ def __init__(self, node=None): class SplitOnLinebreak(BaseSegmentor[SplitOnLinebreakInputs, SplitOnLinebreakOutputs]): """ - The "Split On Linebreak" function divides a given string into a list of -substrings, using linebreaks (newline characters) as the points of separation. + The "Split On Linebreak" function divides a given string into a list of + substrings, using linebreaks (newline characters) as the points of separation. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "split-on-linebreak" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -1537,13 +1569,14 @@ def __init__(self, node=None): class KeywordSpotting(AssetNode[KeywordSpottingInputs, KeywordSpottingOutputs]): """ - Keyword Spotting is a function that enables the detection and identification of -specific words or phrases within a stream of audio, often used in voice- -activated systems to trigger actions or commands based on recognized keywords. + Keyword Spotting is a function that enables the detection and identification of + specific words or phrases within a stream of audio, often used in voice- + activated systems to trigger actions or commands based on recognized keywords. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "keyword-spotting" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -1576,12 +1609,13 @@ def __init__(self, node=None): class TextClassification(AssetNode[TextClassificationInputs, TextClassificationOutputs]): """ - Categorizes text into predefined groups or topics, facilitating content -organization and targeted actions. + Categorizes text into predefined groups or topics, facilitating content + organization and targeted actions. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "text-classification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1614,12 +1648,13 @@ def __init__(self, node=None): class OffensiveLanguageIdentification(AssetNode[OffensiveLanguageIdentificationInputs, OffensiveLanguageIdentificationOutputs]): """ - Detects language or phrases that might be considered offensive, aiding in -content moderation and creating respectful user interactions. + Detects language or phrases that might be considered offensive, aiding in + content moderation and creating respectful user interactions. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "offensive-language-identification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1652,12 +1687,13 @@ def __init__(self, node=None): class SpeechNonSpeechClassification(AssetNode[SpeechNonSpeechClassificationInputs, SpeechNonSpeechClassificationOutputs]): """ - Differentiates between speech and non-speech audio segments. Great for editing -software and transcription services to exclude irrelevant audio. + Differentiates between speech and non-speech audio segments. Great for editing + software and transcription services to exclude irrelevant audio. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "speech-non-speech-classification" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -1692,13 +1728,14 @@ def __init__(self, node=None): class NamedEntityRecognition(AssetNode[NamedEntityRecognitionInputs, NamedEntityRecognitionOutputs]): """ - Identifies and classifies named entities (e.g., persons, organizations, -locations) within text. Useful for information extraction, content tagging, and -search enhancements. + Identifies and classifies named entities (e.g., persons, organizations, + locations) within text. Useful for information extraction, content tagging, and + search enhancements. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "named-entity-recognition" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -1727,13 +1764,14 @@ def __init__(self, node=None): class ImageManipulation(AssetNode[ImageManipulationInputs, ImageManipulationOutputs]): """ - Image Manipulation refers to the process of altering or enhancing digital -images using various techniques and tools to achieve desired visual effects, -correct imperfections, or transform the image's appearance. + Image Manipulation refers to the process of altering or enhancing digital + images using various techniques and tools to achieve desired visual effects, + correct imperfections, or transform the image's appearance. - InputType: image - OutputType: image + InputType: image + OutputType: image """ + function: str = "image-manipulation" input_type: str = DataType.IMAGE output_type: str = DataType.IMAGE @@ -1760,13 +1798,14 @@ def __init__(self, node=None): class SplitOnSilence(AssetNode[SplitOnSilenceInputs, SplitOnSilenceOutputs]): """ - The "Split On Silence" function divides an audio recording into separate -segments based on periods of silence, allowing for easier editing and analysis -of individual sections. + The "Split On Silence" function divides an audio recording into separate + segments based on periods of silence, allowing for easier editing and analysis + of individual sections. - InputType: audio - OutputType: audio + InputType: audio + OutputType: audio """ + function: str = "split-on-silence" input_type: str = DataType.AUDIO output_type: str = DataType.AUDIO @@ -1795,13 +1834,14 @@ def __init__(self, node=None): class TextToVideoGeneration(AssetNode[TextToVideoGenerationInputs, TextToVideoGenerationOutputs]): """ - Text To Video Generation is a process that converts written descriptions or -scripts into dynamic, visual video content using advanced algorithms and -artificial intelligence. + Text To Video Generation is a process that converts written descriptions or + scripts into dynamic, visual video content using advanced algorithms and + artificial intelligence. - InputType: text - OutputType: video + InputType: text + OutputType: video """ + function: str = "text-to-video-generation" input_type: str = DataType.TEXT output_type: str = DataType.VIDEO @@ -1828,14 +1868,15 @@ def __init__(self, node=None): class DocumentInformationExtraction(AssetNode[DocumentInformationExtractionInputs, DocumentInformationExtractionOutputs]): """ - Document Information Extraction is the process of automatically identifying, -extracting, and structuring relevant data from unstructured or semi-structured -documents, such as invoices, receipts, contracts, and forms, to facilitate -easier data management and analysis. + Document Information Extraction is the process of automatically identifying, + extracting, and structuring relevant data from unstructured or semi-structured + documents, such as invoices, receipts, contracts, and forms, to facilitate + easier data management and analysis. - InputType: image - OutputType: text + InputType: image + OutputType: text """ + function: str = "document-information-extraction" input_type: str = DataType.IMAGE output_type: str = DataType.TEXT @@ -1862,12 +1903,13 @@ def __init__(self, node=None): class VideoGeneration(AssetNode[VideoGenerationInputs, VideoGenerationOutputs]): """ - Produces video content based on specific inputs or datasets. Can be used for -simulations, animations, or even deepfake detection. + Produces video content based on specific inputs or datasets. Can be used for + simulations, animations, or even deepfake detection. - InputType: text - OutputType: video + InputType: text + OutputType: video """ + function: str = "video-generation" input_type: str = DataType.TEXT output_type: str = DataType.VIDEO @@ -1894,12 +1936,13 @@ def __init__(self, node=None): class TextToImageGeneration(AssetNode[TextToImageGenerationInputs, TextToImageGenerationOutputs]): """ - Creates a visual representation based on textual input, turning descriptions -into pictorial forms. Used in creative processes and content generation. + Creates a visual representation based on textual input, turning descriptions + into pictorial forms. Used in creative processes and content generation. - InputType: text - OutputType: image + InputType: text + OutputType: image """ + function: str = "text-to-image-generation" input_type: str = DataType.TEXT output_type: str = DataType.IMAGE @@ -1928,16 +1971,19 @@ def __init__(self, node=None): self.data = self.create_param(code="data", data_type=DataType.TEXT) -class ReferencelessTextGenerationMetric(BaseMetric[ReferencelessTextGenerationMetricInputs, ReferencelessTextGenerationMetricOutputs]): +class ReferencelessTextGenerationMetric( + BaseMetric[ReferencelessTextGenerationMetricInputs, ReferencelessTextGenerationMetricOutputs] +): """ - The Referenceless Text Generation Metric is a method for evaluating the quality -of generated text without requiring a reference text for comparison, often -leveraging models or algorithms to assess coherence, relevance, and fluency -based on intrinsic properties of the text itself. + The Referenceless Text Generation Metric is a method for evaluating the quality + of generated text without requiring a reference text for comparison, often + leveraging models or algorithms to assess coherence, relevance, and fluency + based on intrinsic properties of the text itself. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "referenceless-text-generation-metric" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -1966,14 +2012,15 @@ def __init__(self, node=None): class OtherMultipurpose(AssetNode[OtherMultipurposeInputs, OtherMultipurposeOutputs]): """ - The "Other (Multipurpose)" function serves as a versatile category designed to -accommodate a wide range of tasks and activities that do not fit neatly into -predefined classifications, offering flexibility and adaptability for various -needs. + The "Other (Multipurpose)" function serves as a versatile category designed to + accommodate a wide range of tasks and activities that do not fit neatly into + predefined classifications, offering flexibility and adaptability for various + needs. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "other-(multipurpose)" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2002,12 +2049,13 @@ def __init__(self, node=None): class ImageLabelDetection(AssetNode[ImageLabelDetectionInputs, ImageLabelDetectionOutputs]): """ - Identifies objects, themes, or topics within images, useful for image -categorization, search, and recommendation systems. + Identifies objects, themes, or topics within images, useful for image + categorization, search, and recommendation systems. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "image-label-detection" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -2040,12 +2088,13 @@ def __init__(self, node=None): class SpeakerDiarizationVideo(AssetNode[SpeakerDiarizationVideoInputs, SpeakerDiarizationVideoOutputs]): """ - Segments a video based on different speakers, identifying when each individual -speaks. Useful for transcriptions and understanding multi-person conversations. + Segments a video based on different speakers, identifying when each individual + speaks. Useful for transcriptions and understanding multi-person conversations. - InputType: video - OutputType: label + InputType: video + OutputType: label """ + function: str = "speaker-diarization-video" input_type: str = DataType.VIDEO output_type: str = DataType.LABEL @@ -2082,12 +2131,13 @@ def __init__(self, node=None): class AudioTranscriptImprovement(AssetNode[AudioTranscriptImprovementInputs, AudioTranscriptImprovementOutputs]): """ - Refines and corrects transcriptions generated from audio data, improving -readability and accuracy. + Refines and corrects transcriptions generated from audio data, improving + readability and accuracy. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "audio-transcript-improvement" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -2116,12 +2166,13 @@ def __init__(self, node=None): class DialectDetection(AssetNode[DialectDetectionInputs, DialectDetectionOutputs]): """ - Identifies specific dialects within a language, aiding in localized content -creation or user experience personalization. + Identifies specific dialects within a language, aiding in localized content + creation or user experience personalization. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "dialect-detection" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -2154,12 +2205,13 @@ def __init__(self, node=None): class SentimentAnalysis(AssetNode[SentimentAnalysisInputs, SentimentAnalysisOutputs]): """ - Determines the sentiment or emotion (e.g., positive, negative, neutral) of a -piece of text, aiding in understanding user feedback or market sentiment. + Determines the sentiment or emotion (e.g., positive, negative, neutral) of a + piece of text, aiding in understanding user feedback or market sentiment. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "sentiment-analysis" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -2192,13 +2244,14 @@ def __init__(self, node=None): class SpeechEmbedding(AssetNode[SpeechEmbeddingInputs, SpeechEmbeddingOutputs]): """ - Transforms spoken content into a fixed-size vector in a high-dimensional space -that captures the content's essence. Facilitates tasks like speech recognition -and speaker verification. + Transforms spoken content into a fixed-size vector in a high-dimensional space + that captures the content's essence. Facilitates tasks like speech recognition + and speaker verification. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "speech-embedding" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -2231,13 +2284,14 @@ def __init__(self, node=None): class TextGenerationMetricDefault(BaseMetric[TextGenerationMetricDefaultInputs, TextGenerationMetricDefaultOutputs]): """ - The "Text Generation Metric Default" function provides a standard set of -evaluation metrics for assessing the quality and performance of text generation -models. + The "Text Generation Metric Default" function provides a standard set of + evaluation metrics for assessing the quality and performance of text generation + models. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-generation-metric-default" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2270,14 +2324,15 @@ def __init__(self, node=None): class AudioGenerationMetric(BaseMetric[AudioGenerationMetricInputs, AudioGenerationMetricOutputs]): """ - The Audio Generation Metric is a quantitative measure used to evaluate the -quality, accuracy, and overall performance of audio generated by artificial -intelligence systems, often considering factors such as fidelity, -intelligibility, and similarity to human-produced audio. + The Audio Generation Metric is a quantitative measure used to evaluate the + quality, accuracy, and overall performance of audio generated by artificial + intelligence systems, often considering factors such as fidelity, + intelligibility, and similarity to human-produced audio. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "audio-generation-metric" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2304,12 +2359,13 @@ def __init__(self, node=None): class AudioLanguageIdentification(AssetNode[AudioLanguageIdentificationInputs, AudioLanguageIdentificationOutputs]): """ - Audio Language Identification is a process that involves analyzing an audio -recording to determine the language being spoken. + Audio Language Identification is a process that involves analyzing an audio + recording to determine the language being spoken. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "audio-language-identification" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -2338,12 +2394,13 @@ def __init__(self, node=None): class VideoLabelDetection(AssetNode[VideoLabelDetectionInputs, VideoLabelDetectionOutputs]): """ - Identifies and tags objects, scenes, or activities within a video. Useful for -content indexing and recommendation systems. + Identifies and tags objects, scenes, or activities within a video. Useful for + content indexing and recommendation systems. - InputType: video - OutputType: label + InputType: video + OutputType: label """ + function: str = "video-label-detection" input_type: str = DataType.VIDEO output_type: str = DataType.LABEL @@ -2376,12 +2433,13 @@ def __init__(self, node=None): class TopicClassification(AssetNode[TopicClassificationInputs, TopicClassificationOutputs]): """ - Assigns categories or topics to a piece of text based on its content, -facilitating content organization and retrieval. + Assigns categories or topics to a piece of text based on its content, + facilitating content organization and retrieval. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "topic-classification" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -2410,15 +2468,18 @@ def __init__(self, node=None): self.data = self.create_param(code="data", data_type=DataType.TEXT) -class ReferencelessTextGenerationMetricDefault(BaseMetric[ReferencelessTextGenerationMetricDefaultInputs, ReferencelessTextGenerationMetricDefaultOutputs]): +class ReferencelessTextGenerationMetricDefault( + BaseMetric[ReferencelessTextGenerationMetricDefaultInputs, ReferencelessTextGenerationMetricDefaultOutputs] +): """ - The Referenceless Text Generation Metric Default is a function designed to -evaluate the quality of generated text without relying on reference texts for -comparison. + The Referenceless Text Generation Metric Default is a function designed to + evaluate the quality of generated text without relying on reference texts for + comparison. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "referenceless-text-generation-metric-default" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2447,12 +2508,13 @@ def __init__(self, node=None): class ImageContentModeration(AssetNode[ImageContentModerationInputs, ImageContentModerationOutputs]): """ - Detects and filters out inappropriate or harmful images, essential for -platforms with user-generated visual content. + Detects and filters out inappropriate or harmful images, essential for + platforms with user-generated visual content. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "image-content-moderation" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -2479,13 +2541,14 @@ def __init__(self, node=None): class AsrAgeClassification(AssetNode[AsrAgeClassificationInputs, AsrAgeClassificationOutputs]): """ - The ASR Age Classification function is designed to analyze audio recordings of -speech to determine the speaker's age group by leveraging automatic speech -recognition (ASR) technology and machine learning algorithms. + The ASR Age Classification function is designed to analyze audio recordings of + speech to determine the speaker's age group by leveraging automatic speech + recognition (ASR) technology and machine learning algorithms. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "asr-age-classification" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -2512,12 +2575,13 @@ def __init__(self, node=None): class AsrGenderClassification(AssetNode[AsrGenderClassificationInputs, AsrGenderClassificationOutputs]): """ - The ASR Gender Classification function analyzes audio recordings to determine -and classify the speaker's gender based on their voice characteristics. + The ASR Gender Classification function analyzes audio recordings to determine + and classify the speaker's gender based on their voice characteristics. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "asr-gender-classification" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -2546,13 +2610,14 @@ def __init__(self, node=None): class BaseModel(AssetNode[BaseModelInputs, BaseModelOutputs]): """ - The Base-Model function serves as a foundational framework designed to provide -essential features and capabilities upon which more specialized or advanced -models can be built and customized. + The Base-Model function serves as a foundational framework designed to provide + essential features and capabilities upon which more specialized or advanced + models can be built and customized. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "base-model" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2579,12 +2644,13 @@ def __init__(self, node=None): class LanguageIdentificationAudio(AssetNode[LanguageIdentificationAudioInputs, LanguageIdentificationAudioOutputs]): """ - The Language Identification Audio function analyzes audio input to determine -and identify the language being spoken. + The Language Identification Audio function analyzes audio input to determine + and identify the language being spoken. - InputType: audio - OutputType: label + InputType: audio + OutputType: label """ + function: str = "language-identification-audio" input_type: str = DataType.AUDIO output_type: str = DataType.LABEL @@ -2611,13 +2677,14 @@ def __init__(self, node=None): class MultiClassImageClassification(AssetNode[MultiClassImageClassificationInputs, MultiClassImageClassificationOutputs]): """ - Multi Class Image Classification is a machine learning task where an algorithm -is trained to categorize images into one of several predefined classes or -categories based on their visual content. + Multi Class Image Classification is a machine learning task where an algorithm + is trained to categorize images into one of several predefined classes or + categories based on their visual content. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "multi-class-image-classification" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -2644,13 +2711,14 @@ def __init__(self, node=None): class SemanticSegmentation(AssetNode[SemanticSegmentationInputs, SemanticSegmentationOutputs]): """ - Semantic segmentation is a computer vision process that involves classifying -each pixel in an image into a predefined category, effectively partitioning the -image into meaningful segments based on the objects or regions they represent. + Semantic segmentation is a computer vision process that involves classifying + each pixel in an image into a predefined category, effectively partitioning the + image into meaningful segments based on the objects or regions they represent. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "semantic-segmentation" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -2677,14 +2745,15 @@ def __init__(self, node=None): class InstanceSegmentation(AssetNode[InstanceSegmentationInputs, InstanceSegmentationOutputs]): """ - Instance segmentation is a computer vision task that involves detecting and -delineating each distinct object within an image, assigning a unique label and -precise boundary to every individual instance of objects, even if they belong -to the same category. + Instance segmentation is a computer vision task that involves detecting and + delineating each distinct object within an image, assigning a unique label and + precise boundary to every individual instance of objects, even if they belong + to the same category. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "instance-segmentation" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -2717,12 +2786,13 @@ def __init__(self, node=None): class EmotionDetection(AssetNode[EmotionDetectionInputs, EmotionDetectionOutputs]): """ - Identifies human emotions from text or audio, enhancing user experience in -chatbots or customer feedback analysis. + Identifies human emotions from text or audio, enhancing user experience in + chatbots or customer feedback analysis. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "emotion-detection" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -2755,13 +2825,14 @@ def __init__(self, node=None): class TextSpamDetection(AssetNode[TextSpamDetectionInputs, TextSpamDetectionOutputs]): """ - Identifies and filters out unwanted or irrelevant text content, ideal for -moderating user-generated content or ensuring quality in communication -platforms. + Identifies and filters out unwanted or irrelevant text content, ideal for + moderating user-generated content or ensuring quality in communication + platforms. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "text-spam-detection" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -2796,12 +2867,13 @@ def __init__(self, node=None): class TextDenormalization(AssetNode[TextDenormalizationInputs, TextDenormalizationOutputs]): """ - Converts standardized or normalized text into its original, often more -readable, form. Useful in natural language generation tasks. + Converts standardized or normalized text into its original, often more + readable, form. Useful in natural language generation tasks. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "text-denormalization" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -2830,15 +2902,18 @@ def __init__(self, node=None): self.data = self.create_param(code="data", data_type=DataType.TEXT) -class ReferencelessAudioGenerationMetric(BaseMetric[ReferencelessAudioGenerationMetricInputs, ReferencelessAudioGenerationMetricOutputs]): +class ReferencelessAudioGenerationMetric( + BaseMetric[ReferencelessAudioGenerationMetricInputs, ReferencelessAudioGenerationMetricOutputs] +): """ - The Referenceless Audio Generation Metric is a tool designed to evaluate the -quality of generated audio content without the need for a reference or original -audio sample for comparison. + The Referenceless Audio Generation Metric is a tool designed to evaluate the + quality of generated audio content without the need for a reference or original + audio sample for comparison. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "referenceless-audio-generation-metric" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2875,12 +2950,13 @@ def __init__(self, node=None): class AudioForcedAlignment(AssetNode[AudioForcedAlignmentInputs, AudioForcedAlignmentOutputs]): """ - Synchronizes phonetic and phonological text with the corresponding segments in -an audio file. Useful in linguistic research and detailed transcription tasks. + Synchronizes phonetic and phonological text with the corresponding segments in + an audio file. Useful in linguistic research and detailed transcription tasks. - InputType: audio - OutputType: audio + InputType: audio + OutputType: audio """ + function: str = "audio-forced-alignment" input_type: str = DataType.AUDIO output_type: str = DataType.AUDIO @@ -2917,12 +2993,13 @@ def __init__(self, node=None): class VideoForcedAlignment(AssetNode[VideoForcedAlignmentInputs, VideoForcedAlignmentOutputs]): """ - Aligns the transcription of spoken content in a video with its corresponding -timecodes, facilitating subtitle creation. + Aligns the transcription of spoken content in a video with its corresponding + timecodes, facilitating subtitle creation. - InputType: video - OutputType: video + InputType: video + OutputType: video """ + function: str = "video-forced-alignment" input_type: str = DataType.VIDEO output_type: str = DataType.VIDEO @@ -2957,12 +3034,13 @@ def __init__(self, node=None): class ClassificationMetric(BaseMetric[ClassificationMetricInputs, ClassificationMetricOutputs]): """ - A Classification Metric is a quantitative measure used to evaluate the quality -and effectiveness of classification models. + A Classification Metric is a quantitative measure used to evaluate the quality + and effectiveness of classification models. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "classification-metric" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -2989,15 +3067,16 @@ def __init__(self, node=None): class AutoMaskGeneration(AssetNode[AutoMaskGenerationInputs, AutoMaskGenerationOutputs]): """ - Auto-mask generation refers to the automated process of creating masks in image -processing or computer vision, typically for segmentation tasks. A mask is a -binary or multi-class image that labels different parts of an image, usually -separating the foreground (objects of interest) from the background, or -identifying specific object classes in an image. + Auto-mask generation refers to the automated process of creating masks in image + processing or computer vision, typically for segmentation tasks. A mask is a + binary or multi-class image that labels different parts of an image, usually + separating the foreground (objects of interest) from the background, or + identifying specific object classes in an image. - InputType: image - OutputType: label + InputType: image + OutputType: label """ + function: str = "auto-mask-generation" input_type: str = DataType.IMAGE output_type: str = DataType.LABEL @@ -3030,14 +3109,15 @@ def __init__(self, node=None): class TextEmbedding(AssetNode[TextEmbeddingInputs, TextEmbeddingOutputs]): """ - Text embedding is a process that converts text into numerical vectors, -capturing the semantic meaning and contextual relationships of words or -phrases, enabling machines to understand and analyze natural language more -effectively. + Text embedding is a process that converts text into numerical vectors, + capturing the semantic meaning and contextual relationships of words or + phrases, enabling machines to understand and analyze natural language more + effectively. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-embedding" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3066,13 +3146,14 @@ def __init__(self, node=None): class FactChecking(AssetNode[FactCheckingInputs, FactCheckingOutputs]): """ - Fact Checking is the process of verifying the accuracy and truthfulness of -information, statements, or claims by cross-referencing with reliable sources -and evidence. + Fact Checking is the process of verifying the accuracy and truthfulness of + information, statements, or claims by cross-referencing with reliable sources + and evidence. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "fact-checking" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -3101,12 +3182,13 @@ def __init__(self, node=None): class TextToAudio(AssetNode[TextToAudioInputs, TextToAudioOutputs]): """ - The Text to Audio function converts written text into spoken words, allowing -users to listen to the content instead of reading it. + The Text to Audio function converts written text into spoken words, allowing + users to listen to the content instead of reading it. - InputType: text - OutputType: audio + InputType: text + OutputType: audio """ + function: str = "text-to-audio" input_type: str = DataType.TEXT output_type: str = DataType.AUDIO @@ -3139,12 +3221,13 @@ def __init__(self, node=None): class FillTextMask(AssetNode[FillTextMaskInputs, FillTextMaskOutputs]): """ - Completes missing parts of a text based on the context, ideal for content -generation or data augmentation tasks. + Completes missing parts of a text based on the context, ideal for content + generation or data augmentation tasks. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "fill-text-mask" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3183,13 +3266,14 @@ def __init__(self, node=None): class VoiceCloning(AssetNode[VoiceCloningInputs, VoiceCloningOutputs]): """ - Replicates a person's voice based on a sample, allowing for the generation of -speech in that person's tone and style. Used cautiously due to ethical -considerations. + Replicates a person's voice based on a sample, allowing for the generation of + speech in that person's tone and style. Used cautiously due to ethical + considerations. - InputType: text - OutputType: audio + InputType: text + OutputType: audio """ + function: str = "voice-cloning" input_type: str = DataType.TEXT output_type: str = DataType.AUDIO @@ -3222,12 +3306,13 @@ def __init__(self, node=None): class Diacritization(AssetNode[DiacritizationInputs, DiacritizationOutputs]): """ - Adds diacritical marks to text, essential for languages where meaning can -change based on diacritics. + Adds diacritical marks to text, essential for languages where meaning can + change based on diacritics. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "diacritization" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3264,13 +3349,14 @@ def __init__(self, node=None): class SpeechTranslation(AssetNode[SpeechTranslationInputs, SpeechTranslationOutputs]): """ - Speech Translation is a technology that converts spoken language in real-time -from one language to another, enabling seamless communication between speakers -of different languages. + Speech Translation is a technology that converts spoken language in real-time + from one language to another, enabling seamless communication between speakers + of different languages. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "speech-translation" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -3309,12 +3395,13 @@ def __init__(self, node=None): class SpeechSynthesis(AssetNode[SpeechSynthesisInputs, SpeechSynthesisOutputs]): """ - Generates human-like speech from written text. Ideal for text-to-speech -applications, audiobooks, and voice assistants. + Generates human-like speech from written text. Ideal for text-to-speech + applications, audiobooks, and voice assistants. - InputType: text - OutputType: audio + InputType: text + OutputType: audio """ + function: str = "speech-synthesis" input_type: str = DataType.TEXT output_type: str = DataType.AUDIO @@ -3347,12 +3434,13 @@ def __init__(self, node=None): class TextContentModeration(AssetNode[TextContentModerationInputs, TextContentModerationOutputs]): """ - Scans and identifies potentially harmful, offensive, or inappropriate textual -content, ensuring safer user environments. + Scans and identifies potentially harmful, offensive, or inappropriate textual + content, ensuring safer user environments. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "text-content-moderation" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -3387,12 +3475,13 @@ def __init__(self, node=None): class SubtitlingTranslation(AssetNode[SubtitlingTranslationInputs, SubtitlingTranslationOutputs]): """ - Converts the text of subtitles from one language to another, ensuring context -and cultural nuances are maintained. Essential for global content distribution. + Converts the text of subtitles from one language to another, ensuring context + and cultural nuances are maintained. Essential for global content distribution. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "subtitling-translation" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3427,12 +3516,13 @@ def __init__(self, node=None): class AudioTranscriptAnalysis(AssetNode[AudioTranscriptAnalysisInputs, AudioTranscriptAnalysisOutputs]): """ - Analyzes transcribed audio data for insights, patterns, or specific information -extraction. + Analyzes transcribed audio data for insights, patterns, or specific information + extraction. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "audio-transcript-analysis" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -3467,13 +3557,14 @@ def __init__(self, node=None): class TextGeneration(AssetNode[TextGenerationInputs, TextGenerationOutputs]): """ - Creates coherent and contextually relevant textual content based on prompts or -certain parameters. Useful for chatbots, content creation, and data -augmentation. + Creates coherent and contextually relevant textual content based on prompts or + certain parameters. Useful for chatbots, content creation, and data + augmentation. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "text-generation" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3504,13 +3595,14 @@ def __init__(self, node=None): class TextNormalization(AssetNode[TextNormalizationInputs, TextNormalizationOutputs]): """ - Converts unstructured or non-standard textual data into a more readable and -uniform format, dealing with abbreviations, numerals, and other non-standard -words. + Converts unstructured or non-standard textual data into a more readable and + uniform format, dealing with abbreviations, numerals, and other non-standard + words. - InputType: text - OutputType: label + InputType: text + OutputType: label """ + function: str = "text-normalization" input_type: str = DataType.TEXT output_type: str = DataType.LABEL @@ -3547,12 +3639,13 @@ def __init__(self, node=None): class VoiceActivityDetection(BaseSegmentor[VoiceActivityDetectionInputs, VoiceActivityDetectionOutputs]): """ - Determines when a person is speaking in an audio clip. It's an essential -preprocessing step for other audio-related tasks. + Determines when a person is speaking in an audio clip. It's an essential + preprocessing step for other audio-related tasks. - InputType: audio - OutputType: audio + InputType: audio + OutputType: audio """ + function: str = "voice-activity-detection" input_type: str = DataType.AUDIO output_type: str = DataType.AUDIO @@ -3587,13 +3680,14 @@ def __init__(self, node=None): class VideoUnderstanding(AssetNode[VideoUnderstandingInputs, VideoUnderstandingOutputs]): """ - Video Understanding is the process of analyzing and interpreting video content -to extract meaningful information, such as identifying objects, actions, -events, and contextual relationships within the footage. + Video Understanding is the process of analyzing and interpreting video content + to extract meaningful information, such as identifying objects, actions, + events, and contextual relationships within the footage. - InputType: video - OutputType: text + InputType: video + OutputType: text """ + function: str = "video-understanding" input_type: str = DataType.VIDEO output_type: str = DataType.TEXT @@ -3634,12 +3728,13 @@ def __init__(self, node=None): class Translation(AssetNode[TranslationInputs, TranslationOutputs]): """ - Converts text from one language to another while maintaining the original -message's essence and context. Crucial for global communication. + Converts text from one language to another while maintaining the original + message's essence and context. Crucial for global communication. - InputType: text - OutputType: text + InputType: text + OutputType: text """ + function: str = "translation" input_type: str = DataType.TEXT output_type: str = DataType.TEXT @@ -3674,12 +3769,13 @@ def __init__(self, node=None): class SpeechRecognition(AssetNode[SpeechRecognitionInputs, SpeechRecognitionOutputs]): """ - Converts spoken language into written text. Useful for transcription services, -voice assistants, and applications requiring voice-to-text capabilities. + Converts spoken language into written text. Useful for transcription services, + voice assistants, and applications requiring voice-to-text capabilities. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "speech-recognition" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -3716,12 +3812,13 @@ def __init__(self, node=None): class Subtitling(AssetNode[SubtitlingInputs, SubtitlingOutputs]): """ - Generates accurate subtitles for videos, enhancing accessibility for diverse -audiences. + Generates accurate subtitles for videos, enhancing accessibility for diverse + audiences. - InputType: audio - OutputType: text + InputType: audio + OutputType: text """ + function: str = "subtitling" input_type: str = DataType.AUDIO output_type: str = DataType.TEXT @@ -3730,800 +3827,817 @@ class Subtitling(AssetNode[SubtitlingInputs, SubtitlingOutputs]): outputs_class: Type[TO] = SubtitlingOutputs - class Pipeline(DefaultPipeline): - def object_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ObjectDetection: """ - Object Detection is a computer vision technology that identifies and locates -objects within an image, typically by drawing bounding boxes around the -detected objects and classifying them into predefined categories. + Object Detection is a computer vision technology that identifies and locates + objects within an image, typically by drawing bounding boxes around the + detected objects and classifying them into predefined categories. """ return ObjectDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def language_identification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> LanguageIdentification: """ - Detects the language in which a given text is written, aiding in multilingual -platforms or content localization. + Detects the language in which a given text is written, aiding in multilingual + platforms or content localization. """ return LanguageIdentification(*args, asset_id=asset_id, pipeline=self, **kwargs) def depth_estimation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> DepthEstimation: """ - Depth estimation is a computational process that determines the distance of -objects from a viewpoint, typically using visual data from cameras or sensors -to create a three-dimensional understanding of a scene. + Depth estimation is a computational process that determines the distance of + objects from a viewpoint, typically using visual data from cameras or sensors + to create a three-dimensional understanding of a scene. """ return DepthEstimation(*args, asset_id=asset_id, pipeline=self, **kwargs) def script_execution(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ScriptExecution: """ - Script Execution refers to the process of running a set of programmed -instructions or code within a computing environment, enabling the automated -performance of tasks, calculations, or operations as defined by the script. + Script Execution refers to the process of running a set of programmed + instructions or code within a computing environment, enabling the automated + performance of tasks, calculations, or operations as defined by the script. """ return ScriptExecution(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_embedding(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageEmbedding: """ - Image Embedding is a process that transforms an image into a fixed-dimensional -vector representation, capturing its essential features and enabling efficient -comparison, retrieval, and analysis in various machine learning and computer -vision tasks. + Image Embedding is a process that transforms an image into a fixed-dimensional + vector representation, capturing its essential features and enabling efficient + comparison, retrieval, and analysis in various machine learning and computer + vision tasks. """ return ImageEmbedding(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_to_video_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageToVideoGeneration: """ - The Image To Video Generation function transforms a series of static images -into a cohesive, dynamic video sequence, often incorporating transitions, -effects, and synchronization with audio to create a visually engaging -narrative. + The Image To Video Generation function transforms a series of static images + into a cohesive, dynamic video sequence, often incorporating transitions, + effects, and synchronization with audio to create a visually engaging + narrative. """ return ImageToVideoGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_impainting(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageImpainting: """ - Image inpainting is a process that involves filling in missing or damaged parts -of an image in a way that is visually coherent and seamlessly blends with the -surrounding areas, often using advanced algorithms and techniques to restore -the image to its original or intended appearance. + Image inpainting is a process that involves filling in missing or damaged parts + of an image in a way that is visually coherent and seamlessly blends with the + surrounding areas, often using advanced algorithms and techniques to restore + the image to its original or intended appearance. """ return ImageImpainting(*args, asset_id=asset_id, pipeline=self, **kwargs) def style_transfer(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> StyleTransfer: """ - Style Transfer is a technique in artificial intelligence that applies the -visual style of one image (such as the brushstrokes of a famous painting) to -the content of another image, effectively blending the artistic elements of the -first image with the subject matter of the second. + Style Transfer is a technique in artificial intelligence that applies the + visual style of one image (such as the brushstrokes of a famous painting) to + the content of another image, effectively blending the artistic elements of the + first image with the subject matter of the second. """ return StyleTransfer(*args, asset_id=asset_id, pipeline=self, **kwargs) - def multi_class_text_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> MultiClassTextClassification: + def multi_class_text_classification( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> MultiClassTextClassification: """ - Multi Class Text Classification is a natural language processing task that -involves categorizing a given text into one of several predefined classes or -categories based on its content. + Multi Class Text Classification is a natural language processing task that + involves categorizing a given text into one of several predefined classes or + categories based on its content. """ return MultiClassTextClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def part_of_speech_tagging(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> PartOfSpeechTagging: """ - Part of Speech Tagging is a natural language processing task that involves -assigning each word in a sentence its corresponding part of speech, such as -noun, verb, adjective, or adverb, based on its role and context within the -sentence. + Part of Speech Tagging is a natural language processing task that involves + assigning each word in a sentence its corresponding part of speech, such as + noun, verb, adjective, or adverb, based on its role and context within the + sentence. """ return PartOfSpeechTagging(*args, asset_id=asset_id, pipeline=self, **kwargs) def metric_aggregation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> MetricAggregation: """ - Metric Aggregation is a function that computes and summarizes numerical data by -applying statistical operations, such as averaging, summing, or finding the -minimum and maximum values, to provide insights and facilitate analysis of -large datasets. + Metric Aggregation is a function that computes and summarizes numerical data by + applying statistical operations, such as averaging, summing, or finding the + minimum and maximum values, to provide insights and facilitate analysis of + large datasets. """ return MetricAggregation(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_colorization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageColorization: """ - Image colorization is a process that involves adding color to grayscale images, -transforming them from black-and-white to full-color representations, often -using advanced algorithms and machine learning techniques to predict and apply -the appropriate hues and shades. + Image colorization is a process that involves adding color to grayscale images, + transforming them from black-and-white to full-color representations, often + using advanced algorithms and machine learning techniques to predict and apply + the appropriate hues and shades. """ return ImageColorization(*args, asset_id=asset_id, pipeline=self, **kwargs) def intent_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> IntentClassification: """ - Intent Classification is a natural language processing task that involves -analyzing and categorizing user text input to determine the underlying purpose -or goal behind the communication, such as booking a flight, asking for weather -information, or setting a reminder. + Intent Classification is a natural language processing task that involves + analyzing and categorizing user text input to determine the underlying purpose + or goal behind the communication, such as booking a flight, asking for weather + information, or setting a reminder. """ return IntentClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_intent_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioIntentDetection: """ - Audio Intent Detection is a process that involves analyzing audio signals to -identify and interpret the underlying intentions or purposes behind spoken -words, enabling systems to understand and respond appropriately to human -speech. + Audio Intent Detection is a process that involves analyzing audio signals to + identify and interpret the underlying intentions or purposes behind spoken + words, enabling systems to understand and respond appropriately to human + speech. """ return AudioIntentDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def asr_quality_estimation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AsrQualityEstimation: """ - ASR Quality Estimation is a process that evaluates the accuracy and reliability -of automatic speech recognition systems by analyzing their performance in -transcribing spoken language into text. + ASR Quality Estimation is a process that evaluates the accuracy and reliability + of automatic speech recognition systems by analyzing their performance in + transcribing spoken language into text. """ return AsrQualityEstimation(*args, asset_id=asset_id, pipeline=self, **kwargs) def search(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Search: """ - An algorithm that identifies and returns data or items that match particular -keywords or conditions from a dataset. A fundamental tool for databases and -websites. + An algorithm that identifies and returns data or items that match particular + keywords or conditions from a dataset. A fundamental tool for databases and + websites. """ return Search(*args, asset_id=asset_id, pipeline=self, **kwargs) def viseme_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VisemeGeneration: """ - Viseme Generation is the process of creating visual representations of -phonemes, which are the distinct units of sound in speech, to synchronize lip -movements with spoken words in animations or virtual avatars. + Viseme Generation is the process of creating visual representations of + phonemes, which are the distinct units of sound in speech, to synchronize lip + movements with spoken words in animations or virtual avatars. """ return VisemeGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) def ocr(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Ocr: """ - Converts images of typed, handwritten, or printed text into machine-encoded -text. Used in digitizing printed texts for data retrieval. + Converts images of typed, handwritten, or printed text into machine-encoded + text. Used in digitizing printed texts for data retrieval. """ return Ocr(*args, asset_id=asset_id, pipeline=self, **kwargs) def loglikelihood(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Loglikelihood: """ - The Log Likelihood function measures the probability of observing the given -data under a specific statistical model by taking the natural logarithm of the -likelihood function, thereby transforming the product of probabilities into a -sum, which simplifies the process of optimization and parameter estimation. + The Log Likelihood function measures the probability of observing the given + data under a specific statistical model by taking the natural logarithm of the + likelihood function, thereby transforming the product of probabilities into a + sum, which simplifies the process of optimization and parameter estimation. """ return Loglikelihood(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_embedding(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoEmbedding: """ - Video Embedding is a process that transforms video content into a fixed- -dimensional vector representation, capturing essential features and patterns to -facilitate tasks such as retrieval, classification, and recommendation. + Video Embedding is a process that transforms video content into a fixed- + dimensional vector representation, capturing essential features and patterns to + facilitate tasks such as retrieval, classification, and recommendation. """ return VideoEmbedding(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_segmenation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextSegmenation: """ - Text Segmentation is the process of dividing a continuous text into meaningful -units, such as words, sentences, or topics, to facilitate easier analysis and -understanding. + Text Segmentation is the process of dividing a continuous text into meaningful + units, such as words, sentences, or topics, to facilitate easier analysis and + understanding. """ return TextSegmenation(*args, asset_id=asset_id, pipeline=self, **kwargs) def expression_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ExpressionDetection: """ - Expression Detection is the process of identifying and analyzing facial -expressions to interpret emotions or intentions using AI and computer vision -techniques. + Expression Detection is the process of identifying and analyzing facial + expressions to interpret emotions or intentions using AI and computer vision + techniques. """ return ExpressionDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def speech_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechClassification: """ - Categorizes audio clips based on their content, aiding in content organization -and targeted actions. + Categorizes audio clips based on their content, aiding in content organization + and targeted actions. """ return SpeechClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def inverse_text_normalization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> InverseTextNormalization: """ - Inverse Text Normalization is the process of converting spoken or written -language in its normalized form, such as numbers, dates, and abbreviations, -back into their original, more complex or detailed textual representations. + Inverse Text Normalization is the process of converting spoken or written + language in its normalized form, such as numbers, dates, and abbreviations, + back into their original, more complex or detailed textual representations. """ return InverseTextNormalization(*args, asset_id=asset_id, pipeline=self, **kwargs) def extract_audio_from_video(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ExtractAudioFromVideo: """ - Isolates and extracts audio tracks from video files, aiding in audio analysis -or transcription tasks. + Isolates and extracts audio tracks from video files, aiding in audio analysis + or transcription tasks. """ return ExtractAudioFromVideo(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_compression(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageCompression: """ - Reduces the size of image files without significantly compromising their visual -quality. Useful for optimizing storage and improving webpage load times. + Reduces the size of image files without significantly compromising their visual + quality. Useful for optimizing storage and improving webpage load times. """ return ImageCompression(*args, asset_id=asset_id, pipeline=self, **kwargs) def noise_removal(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> NoiseRemoval: """ - Noise Removal is a process that involves identifying and eliminating unwanted -random variations or disturbances from an audio signal to enhance the clarity -and quality of the underlying information. + Noise Removal is a process that involves identifying and eliminating unwanted + random variations or disturbances from an audio signal to enhance the clarity + and quality of the underlying information. """ return NoiseRemoval(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_summarization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextSummarization: """ - Extracts the main points from a larger body of text, producing a concise -summary without losing the primary message. + Extracts the main points from a larger body of text, producing a concise + summary without losing the primary message. """ return TextSummarization(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_generation_metric(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextGenerationMetric: """ - A Text Generation Metric is a quantitative measure used to evaluate the quality -and effectiveness of text produced by natural language processing models, often -assessing aspects such as coherence, relevance, fluency, and adherence to given -prompts or instructions. + A Text Generation Metric is a quantitative measure used to evaluate the quality + and effectiveness of text produced by natural language processing models, often + assessing aspects such as coherence, relevance, fluency, and adherence to given + prompts or instructions. """ return TextGenerationMetric(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_captioning(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageCaptioning: """ - Image Captioning is a process that involves generating a textual description of -an image, typically using machine learning models to analyze the visual content -and produce coherent and contextually relevant sentences that describe the -objects, actions, and scenes depicted in the image. + Image Captioning is a process that involves generating a textual description of + an image, typically using machine learning models to analyze the visual content + and produce coherent and contextually relevant sentences that describe the + objects, actions, and scenes depicted in the image. """ return ImageCaptioning(*args, asset_id=asset_id, pipeline=self, **kwargs) def benchmark_scoring_mt(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> BenchmarkScoringMt: """ - Benchmark Scoring MT is a function designed to evaluate and score machine -translation systems by comparing their output against a set of predefined -benchmarks, thereby assessing their accuracy and performance. + Benchmark Scoring MT is a function designed to evaluate and score machine + translation systems by comparing their output against a set of predefined + benchmarks, thereby assessing their accuracy and performance. """ return BenchmarkScoringMt(*args, asset_id=asset_id, pipeline=self, **kwargs) def speaker_diarization_audio(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeakerDiarizationAudio: """ - Identifies individual speakers and their respective speech segments within an -audio clip. Ideal for multi-speaker recordings or conference calls. + Identifies individual speakers and their respective speech segments within an + audio clip. Ideal for multi-speaker recordings or conference calls. """ return SpeakerDiarizationAudio(*args, asset_id=asset_id, pipeline=self, **kwargs) def benchmark_scoring_asr(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> BenchmarkScoringAsr: """ - Benchmark Scoring ASR is a function that evaluates and compares the performance -of automatic speech recognition systems by analyzing their accuracy, speed, and -other relevant metrics against a standardized set of benchmarks. + Benchmark Scoring ASR is a function that evaluates and compares the performance + of automatic speech recognition systems by analyzing their accuracy, speed, and + other relevant metrics against a standardized set of benchmarks. """ return BenchmarkScoringAsr(*args, asset_id=asset_id, pipeline=self, **kwargs) def visual_question_answering(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VisualQuestionAnswering: """ - Visual Question Answering (VQA) is a task in artificial intelligence that -involves analyzing an image and providing accurate, contextually relevant -answers to questions posed about the visual content of that image. + Visual Question Answering (VQA) is a task in artificial intelligence that + involves analyzing an image and providing accurate, contextually relevant + answers to questions posed about the visual content of that image. """ return VisualQuestionAnswering(*args, asset_id=asset_id, pipeline=self, **kwargs) def document_image_parsing(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> DocumentImageParsing: """ - Document Image Parsing is the process of analyzing and converting scanned or -photographed images of documents into structured, machine-readable formats by -identifying and extracting text, layout, and other relevant information. + Document Image Parsing is the process of analyzing and converting scanned or + photographed images of documents into structured, machine-readable formats by + identifying and extracting text, layout, and other relevant information. """ return DocumentImageParsing(*args, asset_id=asset_id, pipeline=self, **kwargs) - def multi_label_text_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> MultiLabelTextClassification: + def multi_label_text_classification( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> MultiLabelTextClassification: """ - Multi Label Text Classification is a natural language processing task where a -given text is analyzed and assigned multiple relevant labels or categories from -a predefined set, allowing for the text to belong to more than one category -simultaneously. + Multi Label Text Classification is a natural language processing task where a + given text is analyzed and assigned multiple relevant labels or categories from + a predefined set, allowing for the text to belong to more than one category + simultaneously. """ return MultiLabelTextClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_reconstruction(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextReconstruction: """ - Text Reconstruction is a process that involves piecing together fragmented or -incomplete text data to restore it to its original, coherent form. + Text Reconstruction is a process that involves piecing together fragmented or + incomplete text data to restore it to its original, coherent form. """ return TextReconstruction(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_content_moderation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoContentModeration: """ - Automatically reviews video content to detect and possibly remove inappropriate -or harmful material. Essential for user-generated content platforms. + Automatically reviews video content to detect and possibly remove inappropriate + or harmful material. Essential for user-generated content platforms. """ return VideoContentModeration(*args, asset_id=asset_id, pipeline=self, **kwargs) - def multilingual_speech_recognition(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> MultilingualSpeechRecognition: + def multilingual_speech_recognition( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> MultilingualSpeechRecognition: """ - Multilingual Speech Recognition is a technology that enables the automatic -transcription of spoken language into text across multiple languages, allowing -for seamless communication and understanding in diverse linguistic contexts. + Multilingual Speech Recognition is a technology that enables the automatic + transcription of spoken language into text across multiple languages, allowing + for seamless communication and understanding in diverse linguistic contexts. """ return MultilingualSpeechRecognition(*args, asset_id=asset_id, pipeline=self, **kwargs) def entity_linking(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> EntityLinking: """ - Associates identified entities in the text with specific entries in a knowledge -base or database. + Associates identified entities in the text with specific entries in a knowledge + base or database. """ return EntityLinking(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_reconstruction(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioReconstruction: """ - Audio Reconstruction is the process of restoring or recreating audio signals -from incomplete, damaged, or degraded recordings to achieve a high-quality, -accurate representation of the original sound. + Audio Reconstruction is the process of restoring or recreating audio signals + from incomplete, damaged, or degraded recordings to achieve a high-quality, + accurate representation of the original sound. """ return AudioReconstruction(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_emotion_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioEmotionDetection: """ - Audio Emotion Detection is a technology that analyzes vocal characteristics and -patterns in audio recordings to identify and classify the emotional state of -the speaker. + Audio Emotion Detection is a technology that analyzes vocal characteristics and + patterns in audio recordings to identify and classify the emotional state of + the speaker. """ return AudioEmotionDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def split_on_linebreak(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SplitOnLinebreak: """ - The "Split On Linebreak" function divides a given string into a list of -substrings, using linebreaks (newline characters) as the points of separation. + The "Split On Linebreak" function divides a given string into a list of + substrings, using linebreaks (newline characters) as the points of separation. """ return SplitOnLinebreak(*args, asset_id=asset_id, pipeline=self, **kwargs) def keyword_spotting(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> KeywordSpotting: """ - Keyword Spotting is a function that enables the detection and identification of -specific words or phrases within a stream of audio, often used in voice- -activated systems to trigger actions or commands based on recognized keywords. + Keyword Spotting is a function that enables the detection and identification of + specific words or phrases within a stream of audio, often used in voice- + activated systems to trigger actions or commands based on recognized keywords. """ return KeywordSpotting(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextClassification: """ - Categorizes text into predefined groups or topics, facilitating content -organization and targeted actions. + Categorizes text into predefined groups or topics, facilitating content + organization and targeted actions. """ return TextClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) - def offensive_language_identification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> OffensiveLanguageIdentification: + def offensive_language_identification( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> OffensiveLanguageIdentification: """ - Detects language or phrases that might be considered offensive, aiding in -content moderation and creating respectful user interactions. + Detects language or phrases that might be considered offensive, aiding in + content moderation and creating respectful user interactions. """ return OffensiveLanguageIdentification(*args, asset_id=asset_id, pipeline=self, **kwargs) - def speech_non_speech_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechNonSpeechClassification: + def speech_non_speech_classification( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> SpeechNonSpeechClassification: """ - Differentiates between speech and non-speech audio segments. Great for editing -software and transcription services to exclude irrelevant audio. + Differentiates between speech and non-speech audio segments. Great for editing + software and transcription services to exclude irrelevant audio. """ return SpeechNonSpeechClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def named_entity_recognition(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> NamedEntityRecognition: """ - Identifies and classifies named entities (e.g., persons, organizations, -locations) within text. Useful for information extraction, content tagging, and -search enhancements. + Identifies and classifies named entities (e.g., persons, organizations, + locations) within text. Useful for information extraction, content tagging, and + search enhancements. """ return NamedEntityRecognition(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_manipulation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageManipulation: """ - Image Manipulation refers to the process of altering or enhancing digital -images using various techniques and tools to achieve desired visual effects, -correct imperfections, or transform the image's appearance. + Image Manipulation refers to the process of altering or enhancing digital + images using various techniques and tools to achieve desired visual effects, + correct imperfections, or transform the image's appearance. """ return ImageManipulation(*args, asset_id=asset_id, pipeline=self, **kwargs) def split_on_silence(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SplitOnSilence: """ - The "Split On Silence" function divides an audio recording into separate -segments based on periods of silence, allowing for easier editing and analysis -of individual sections. + The "Split On Silence" function divides an audio recording into separate + segments based on periods of silence, allowing for easier editing and analysis + of individual sections. """ return SplitOnSilence(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_to_video_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextToVideoGeneration: """ - Text To Video Generation is a process that converts written descriptions or -scripts into dynamic, visual video content using advanced algorithms and -artificial intelligence. + Text To Video Generation is a process that converts written descriptions or + scripts into dynamic, visual video content using advanced algorithms and + artificial intelligence. """ return TextToVideoGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) - def document_information_extraction(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> DocumentInformationExtraction: + def document_information_extraction( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> DocumentInformationExtraction: """ - Document Information Extraction is the process of automatically identifying, -extracting, and structuring relevant data from unstructured or semi-structured -documents, such as invoices, receipts, contracts, and forms, to facilitate -easier data management and analysis. + Document Information Extraction is the process of automatically identifying, + extracting, and structuring relevant data from unstructured or semi-structured + documents, such as invoices, receipts, contracts, and forms, to facilitate + easier data management and analysis. """ return DocumentInformationExtraction(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoGeneration: """ - Produces video content based on specific inputs or datasets. Can be used for -simulations, animations, or even deepfake detection. + Produces video content based on specific inputs or datasets. Can be used for + simulations, animations, or even deepfake detection. """ return VideoGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_to_image_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextToImageGeneration: """ - Creates a visual representation based on textual input, turning descriptions -into pictorial forms. Used in creative processes and content generation. + Creates a visual representation based on textual input, turning descriptions + into pictorial forms. Used in creative processes and content generation. """ return TextToImageGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) - def referenceless_text_generation_metric(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ReferencelessTextGenerationMetric: + def referenceless_text_generation_metric( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> ReferencelessTextGenerationMetric: """ - The Referenceless Text Generation Metric is a method for evaluating the quality -of generated text without requiring a reference text for comparison, often -leveraging models or algorithms to assess coherence, relevance, and fluency -based on intrinsic properties of the text itself. + The Referenceless Text Generation Metric is a method for evaluating the quality + of generated text without requiring a reference text for comparison, often + leveraging models or algorithms to assess coherence, relevance, and fluency + based on intrinsic properties of the text itself. """ return ReferencelessTextGenerationMetric(*args, asset_id=asset_id, pipeline=self, **kwargs) def other__multipurpose_(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> OtherMultipurpose: """ - The "Other (Multipurpose)" function serves as a versatile category designed to -accommodate a wide range of tasks and activities that do not fit neatly into -predefined classifications, offering flexibility and adaptability for various -needs. + The "Other (Multipurpose)" function serves as a versatile category designed to + accommodate a wide range of tasks and activities that do not fit neatly into + predefined classifications, offering flexibility and adaptability for various + needs. """ return OtherMultipurpose(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_label_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageLabelDetection: """ - Identifies objects, themes, or topics within images, useful for image -categorization, search, and recommendation systems. + Identifies objects, themes, or topics within images, useful for image + categorization, search, and recommendation systems. """ return ImageLabelDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def speaker_diarization_video(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeakerDiarizationVideo: """ - Segments a video based on different speakers, identifying when each individual -speaks. Useful for transcriptions and understanding multi-person conversations. + Segments a video based on different speakers, identifying when each individual + speaks. Useful for transcriptions and understanding multi-person conversations. """ return SpeakerDiarizationVideo(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_transcript_improvement(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioTranscriptImprovement: """ - Refines and corrects transcriptions generated from audio data, improving -readability and accuracy. + Refines and corrects transcriptions generated from audio data, improving + readability and accuracy. """ return AudioTranscriptImprovement(*args, asset_id=asset_id, pipeline=self, **kwargs) def dialect_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> DialectDetection: """ - Identifies specific dialects within a language, aiding in localized content -creation or user experience personalization. + Identifies specific dialects within a language, aiding in localized content + creation or user experience personalization. """ return DialectDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def sentiment_analysis(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SentimentAnalysis: """ - Determines the sentiment or emotion (e.g., positive, negative, neutral) of a -piece of text, aiding in understanding user feedback or market sentiment. + Determines the sentiment or emotion (e.g., positive, negative, neutral) of a + piece of text, aiding in understanding user feedback or market sentiment. """ return SentimentAnalysis(*args, asset_id=asset_id, pipeline=self, **kwargs) def speech_embedding(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechEmbedding: """ - Transforms spoken content into a fixed-size vector in a high-dimensional space -that captures the content's essence. Facilitates tasks like speech recognition -and speaker verification. + Transforms spoken content into a fixed-size vector in a high-dimensional space + that captures the content's essence. Facilitates tasks like speech recognition + and speaker verification. """ return SpeechEmbedding(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_generation_metric_default(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextGenerationMetricDefault: """ - The "Text Generation Metric Default" function provides a standard set of -evaluation metrics for assessing the quality and performance of text generation -models. + The "Text Generation Metric Default" function provides a standard set of + evaluation metrics for assessing the quality and performance of text generation + models. """ return TextGenerationMetricDefault(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_generation_metric(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioGenerationMetric: """ - The Audio Generation Metric is a quantitative measure used to evaluate the -quality, accuracy, and overall performance of audio generated by artificial -intelligence systems, often considering factors such as fidelity, -intelligibility, and similarity to human-produced audio. + The Audio Generation Metric is a quantitative measure used to evaluate the + quality, accuracy, and overall performance of audio generated by artificial + intelligence systems, often considering factors such as fidelity, + intelligibility, and similarity to human-produced audio. """ return AudioGenerationMetric(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_language_identification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioLanguageIdentification: """ - Audio Language Identification is a process that involves analyzing an audio -recording to determine the language being spoken. + Audio Language Identification is a process that involves analyzing an audio + recording to determine the language being spoken. """ return AudioLanguageIdentification(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_label_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoLabelDetection: """ - Identifies and tags objects, scenes, or activities within a video. Useful for -content indexing and recommendation systems. + Identifies and tags objects, scenes, or activities within a video. Useful for + content indexing and recommendation systems. """ return VideoLabelDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def topic_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TopicClassification: """ - Assigns categories or topics to a piece of text based on its content, -facilitating content organization and retrieval. + Assigns categories or topics to a piece of text based on its content, + facilitating content organization and retrieval. """ return TopicClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) - def referenceless_text_generation_metric_default(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ReferencelessTextGenerationMetricDefault: + def referenceless_text_generation_metric_default( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> ReferencelessTextGenerationMetricDefault: """ - The Referenceless Text Generation Metric Default is a function designed to -evaluate the quality of generated text without relying on reference texts for -comparison. + The Referenceless Text Generation Metric Default is a function designed to + evaluate the quality of generated text without relying on reference texts for + comparison. """ return ReferencelessTextGenerationMetricDefault(*args, asset_id=asset_id, pipeline=self, **kwargs) def image_content_moderation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ImageContentModeration: """ - Detects and filters out inappropriate or harmful images, essential for -platforms with user-generated visual content. + Detects and filters out inappropriate or harmful images, essential for + platforms with user-generated visual content. """ return ImageContentModeration(*args, asset_id=asset_id, pipeline=self, **kwargs) def asr_age_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AsrAgeClassification: """ - The ASR Age Classification function is designed to analyze audio recordings of -speech to determine the speaker's age group by leveraging automatic speech -recognition (ASR) technology and machine learning algorithms. + The ASR Age Classification function is designed to analyze audio recordings of + speech to determine the speaker's age group by leveraging automatic speech + recognition (ASR) technology and machine learning algorithms. """ return AsrAgeClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def asr_gender_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AsrGenderClassification: """ - The ASR Gender Classification function analyzes audio recordings to determine -and classify the speaker's gender based on their voice characteristics. + The ASR Gender Classification function analyzes audio recordings to determine + and classify the speaker's gender based on their voice characteristics. """ return AsrGenderClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def base_model(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> BaseModel: """ - The Base-Model function serves as a foundational framework designed to provide -essential features and capabilities upon which more specialized or advanced -models can be built and customized. + The Base-Model function serves as a foundational framework designed to provide + essential features and capabilities upon which more specialized or advanced + models can be built and customized. """ return BaseModel(*args, asset_id=asset_id, pipeline=self, **kwargs) def language_identification_audio(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> LanguageIdentificationAudio: """ - The Language Identification Audio function analyzes audio input to determine -and identify the language being spoken. + The Language Identification Audio function analyzes audio input to determine + and identify the language being spoken. """ return LanguageIdentificationAudio(*args, asset_id=asset_id, pipeline=self, **kwargs) - def multi_class_image_classification(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> MultiClassImageClassification: + def multi_class_image_classification( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> MultiClassImageClassification: """ - Multi Class Image Classification is a machine learning task where an algorithm -is trained to categorize images into one of several predefined classes or -categories based on their visual content. + Multi Class Image Classification is a machine learning task where an algorithm + is trained to categorize images into one of several predefined classes or + categories based on their visual content. """ return MultiClassImageClassification(*args, asset_id=asset_id, pipeline=self, **kwargs) def semantic_segmentation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SemanticSegmentation: """ - Semantic segmentation is a computer vision process that involves classifying -each pixel in an image into a predefined category, effectively partitioning the -image into meaningful segments based on the objects or regions they represent. + Semantic segmentation is a computer vision process that involves classifying + each pixel in an image into a predefined category, effectively partitioning the + image into meaningful segments based on the objects or regions they represent. """ return SemanticSegmentation(*args, asset_id=asset_id, pipeline=self, **kwargs) def instance_segmentation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> InstanceSegmentation: """ - Instance segmentation is a computer vision task that involves detecting and -delineating each distinct object within an image, assigning a unique label and -precise boundary to every individual instance of objects, even if they belong -to the same category. + Instance segmentation is a computer vision task that involves detecting and + delineating each distinct object within an image, assigning a unique label and + precise boundary to every individual instance of objects, even if they belong + to the same category. """ return InstanceSegmentation(*args, asset_id=asset_id, pipeline=self, **kwargs) def emotion_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> EmotionDetection: """ - Identifies human emotions from text or audio, enhancing user experience in -chatbots or customer feedback analysis. + Identifies human emotions from text or audio, enhancing user experience in + chatbots or customer feedback analysis. """ return EmotionDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_spam_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextSpamDetection: """ - Identifies and filters out unwanted or irrelevant text content, ideal for -moderating user-generated content or ensuring quality in communication -platforms. + Identifies and filters out unwanted or irrelevant text content, ideal for + moderating user-generated content or ensuring quality in communication + platforms. """ return TextSpamDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_denormalization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextDenormalization: """ - Converts standardized or normalized text into its original, often more -readable, form. Useful in natural language generation tasks. + Converts standardized or normalized text into its original, often more + readable, form. Useful in natural language generation tasks. """ return TextDenormalization(*args, asset_id=asset_id, pipeline=self, **kwargs) - def referenceless_audio_generation_metric(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ReferencelessAudioGenerationMetric: + def referenceless_audio_generation_metric( + self, asset_id: Union[str, asset.Asset], *args, **kwargs + ) -> ReferencelessAudioGenerationMetric: """ - The Referenceless Audio Generation Metric is a tool designed to evaluate the -quality of generated audio content without the need for a reference or original -audio sample for comparison. + The Referenceless Audio Generation Metric is a tool designed to evaluate the + quality of generated audio content without the need for a reference or original + audio sample for comparison. """ return ReferencelessAudioGenerationMetric(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_forced_alignment(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioForcedAlignment: """ - Synchronizes phonetic and phonological text with the corresponding segments in -an audio file. Useful in linguistic research and detailed transcription tasks. + Synchronizes phonetic and phonological text with the corresponding segments in + an audio file. Useful in linguistic research and detailed transcription tasks. """ return AudioForcedAlignment(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_forced_alignment(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoForcedAlignment: """ - Aligns the transcription of spoken content in a video with its corresponding -timecodes, facilitating subtitle creation. + Aligns the transcription of spoken content in a video with its corresponding + timecodes, facilitating subtitle creation. """ return VideoForcedAlignment(*args, asset_id=asset_id, pipeline=self, **kwargs) def classification_metric(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> ClassificationMetric: """ - A Classification Metric is a quantitative measure used to evaluate the quality -and effectiveness of classification models. + A Classification Metric is a quantitative measure used to evaluate the quality + and effectiveness of classification models. """ return ClassificationMetric(*args, asset_id=asset_id, pipeline=self, **kwargs) def auto_mask_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AutoMaskGeneration: """ - Auto-mask generation refers to the automated process of creating masks in image -processing or computer vision, typically for segmentation tasks. A mask is a -binary or multi-class image that labels different parts of an image, usually -separating the foreground (objects of interest) from the background, or -identifying specific object classes in an image. + Auto-mask generation refers to the automated process of creating masks in image + processing or computer vision, typically for segmentation tasks. A mask is a + binary or multi-class image that labels different parts of an image, usually + separating the foreground (objects of interest) from the background, or + identifying specific object classes in an image. """ return AutoMaskGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_embedding(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextEmbedding: """ - Text embedding is a process that converts text into numerical vectors, -capturing the semantic meaning and contextual relationships of words or -phrases, enabling machines to understand and analyze natural language more -effectively. + Text embedding is a process that converts text into numerical vectors, + capturing the semantic meaning and contextual relationships of words or + phrases, enabling machines to understand and analyze natural language more + effectively. """ return TextEmbedding(*args, asset_id=asset_id, pipeline=self, **kwargs) def fact_checking(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> FactChecking: """ - Fact Checking is the process of verifying the accuracy and truthfulness of -information, statements, or claims by cross-referencing with reliable sources -and evidence. + Fact Checking is the process of verifying the accuracy and truthfulness of + information, statements, or claims by cross-referencing with reliable sources + and evidence. """ return FactChecking(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_to_audio(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextToAudio: """ - The Text to Audio function converts written text into spoken words, allowing -users to listen to the content instead of reading it. + The Text to Audio function converts written text into spoken words, allowing + users to listen to the content instead of reading it. """ return TextToAudio(*args, asset_id=asset_id, pipeline=self, **kwargs) def fill_text_mask(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> FillTextMask: """ - Completes missing parts of a text based on the context, ideal for content -generation or data augmentation tasks. + Completes missing parts of a text based on the context, ideal for content + generation or data augmentation tasks. """ return FillTextMask(*args, asset_id=asset_id, pipeline=self, **kwargs) def voice_cloning(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VoiceCloning: """ - Replicates a person's voice based on a sample, allowing for the generation of -speech in that person's tone and style. Used cautiously due to ethical -considerations. + Replicates a person's voice based on a sample, allowing for the generation of + speech in that person's tone and style. Used cautiously due to ethical + considerations. """ return VoiceCloning(*args, asset_id=asset_id, pipeline=self, **kwargs) def diacritization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Diacritization: """ - Adds diacritical marks to text, essential for languages where meaning can -change based on diacritics. + Adds diacritical marks to text, essential for languages where meaning can + change based on diacritics. """ return Diacritization(*args, asset_id=asset_id, pipeline=self, **kwargs) def speech_translation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechTranslation: """ - Speech Translation is a technology that converts spoken language in real-time -from one language to another, enabling seamless communication between speakers -of different languages. + Speech Translation is a technology that converts spoken language in real-time + from one language to another, enabling seamless communication between speakers + of different languages. """ return SpeechTranslation(*args, asset_id=asset_id, pipeline=self, **kwargs) def speech_synthesis(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechSynthesis: """ - Generates human-like speech from written text. Ideal for text-to-speech -applications, audiobooks, and voice assistants. + Generates human-like speech from written text. Ideal for text-to-speech + applications, audiobooks, and voice assistants. """ return SpeechSynthesis(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_content_moderation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextContentModeration: """ - Scans and identifies potentially harmful, offensive, or inappropriate textual -content, ensuring safer user environments. + Scans and identifies potentially harmful, offensive, or inappropriate textual + content, ensuring safer user environments. """ return TextContentModeration(*args, asset_id=asset_id, pipeline=self, **kwargs) def subtitling_translation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SubtitlingTranslation: """ - Converts the text of subtitles from one language to another, ensuring context -and cultural nuances are maintained. Essential for global content distribution. + Converts the text of subtitles from one language to another, ensuring context + and cultural nuances are maintained. Essential for global content distribution. """ return SubtitlingTranslation(*args, asset_id=asset_id, pipeline=self, **kwargs) def audio_transcript_analysis(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> AudioTranscriptAnalysis: """ - Analyzes transcribed audio data for insights, patterns, or specific information -extraction. + Analyzes transcribed audio data for insights, patterns, or specific information + extraction. """ return AudioTranscriptAnalysis(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_generation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextGeneration: """ - Creates coherent and contextually relevant textual content based on prompts or -certain parameters. Useful for chatbots, content creation, and data -augmentation. + Creates coherent and contextually relevant textual content based on prompts or + certain parameters. Useful for chatbots, content creation, and data + augmentation. """ return TextGeneration(*args, asset_id=asset_id, pipeline=self, **kwargs) def text_normalization(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> TextNormalization: """ - Converts unstructured or non-standard textual data into a more readable and -uniform format, dealing with abbreviations, numerals, and other non-standard -words. + Converts unstructured or non-standard textual data into a more readable and + uniform format, dealing with abbreviations, numerals, and other non-standard + words. """ return TextNormalization(*args, asset_id=asset_id, pipeline=self, **kwargs) def voice_activity_detection(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VoiceActivityDetection: """ - Determines when a person is speaking in an audio clip. It's an essential -preprocessing step for other audio-related tasks. + Determines when a person is speaking in an audio clip. It's an essential + preprocessing step for other audio-related tasks. """ return VoiceActivityDetection(*args, asset_id=asset_id, pipeline=self, **kwargs) def video_understanding(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> VideoUnderstanding: """ - Video Understanding is the process of analyzing and interpreting video content -to extract meaningful information, such as identifying objects, actions, -events, and contextual relationships within the footage. + Video Understanding is the process of analyzing and interpreting video content + to extract meaningful information, such as identifying objects, actions, + events, and contextual relationships within the footage. """ return VideoUnderstanding(*args, asset_id=asset_id, pipeline=self, **kwargs) def translation(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Translation: """ - Converts text from one language to another while maintaining the original -message's essence and context. Crucial for global communication. + Converts text from one language to another while maintaining the original + message's essence and context. Crucial for global communication. """ return Translation(*args, asset_id=asset_id, pipeline=self, **kwargs) def speech_recognition(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> SpeechRecognition: """ - Converts spoken language into written text. Useful for transcription services, -voice assistants, and applications requiring voice-to-text capabilities. + Converts spoken language into written text. Useful for transcription services, + voice assistants, and applications requiring voice-to-text capabilities. """ return SpeechRecognition(*args, asset_id=asset_id, pipeline=self, **kwargs) def subtitling(self, asset_id: Union[str, asset.Asset], *args, **kwargs) -> Subtitling: """ - Generates accurate subtitles for videos, enhancing accessibility for diverse -audiences. + Generates accurate subtitles for videos, enhancing accessibility for diverse + audiences. """ return Subtitling(*args, asset_id=asset_id, pipeline=self, **kwargs) - diff --git a/aixplain/modules/team_agent/__init__.py b/aixplain/modules/team_agent/__init__.py index b7094348..3a0c2513 100644 --- a/aixplain/modules/team_agent/__init__.py +++ b/aixplain/modules/team_agent/__init__.py @@ -297,8 +297,8 @@ def validate(self) -> None: # validate name assert ( - re.match("^[a-zA-Z0-9 ]*$", self.name) is not None - ), "Team Agent Creation Error: Team name must not contain special characters." + re.match(r"^[a-zA-Z0-9 \-\(\)]*$", self.name) is not None + ), "Team Agent Creation Error: Team name contains invalid characters. Only alphanumeric characters, spaces, hyphens, and brackets are allowed." try: llm = ModelFactory.get(self.llm_id) @@ -313,14 +313,14 @@ def update(self) -> None: """Update the Team Agent.""" import warnings import inspect + # Get the current call stack stack = inspect.stack() - if len(stack) > 2 and stack[1].function != 'save': + if len(stack) > 2 and stack[1].function != "save": warnings.warn( - "update() is deprecated and will be removed in a future version. " - "Please use save() instead.", + "update() is deprecated and will be removed in a future version. " "Please use save() instead.", DeprecationWarning, - stacklevel=2 + stacklevel=2, ) from aixplain.factories.team_agent_factory.utils import build_team_agent diff --git a/tests/functional/agent/agent_functional_test.py b/tests/functional/agent/agent_functional_test.py index 314a56b2..d00169f4 100644 --- a/tests/functional/agent/agent_functional_test.py +++ b/tests/functional/agent/agent_functional_test.py @@ -74,7 +74,11 @@ def test_end2end(run_input_map, delete_agents_and_team_agents): tools.append(AgentFactory.create_pipeline_tool(pipeline=tool["pipeline_id"], description=tool["description"])) agent = AgentFactory.create( - name=run_input_map["agent_name"], description=run_input_map["agent_name"], llm_id=run_input_map["llm_id"], tools=tools + name=run_input_map["agent_name"], + description=run_input_map["agent_name"], + role=run_input_map["agent_name"], + llm_id=run_input_map["llm_id"], + tools=tools, ) assert agent is not None assert agent.status == AssetStatus.DRAFT @@ -104,6 +108,7 @@ def test_python_interpreter_tool(delete_agents_and_team_agents): agent = AgentFactory.create( name="Python Developer", description="A Python developer agent. If you get an error from a tool, try to fix it.", + role="A Python developer agent. If you get an error from a tool, try to fix it.", tools=[tool], ) assert agent is not None @@ -130,6 +135,7 @@ def test_custom_code_tool(delete_agents_and_team_agents): agent = AgentFactory.create( name="Add Numbers Agent", description="Add two numbers. Do not directly answer. Use the tool to add the numbers.", + role="Add two numbers. Do not directly answer. Use the tool to add the numbers.", tools=[tool], ) assert agent is not None @@ -168,7 +174,11 @@ def test_update_draft_agent(run_input_map, delete_agents_and_team_agents): tools.append(AgentFactory.create_pipeline_tool(pipeline=tool["pipeline_id"], description=tool["description"])) agent = AgentFactory.create( - name=run_input_map["agent_name"], description=run_input_map["agent_name"], llm_id=run_input_map["llm_id"], tools=tools + name=run_input_map["agent_name"], + description=run_input_map["agent_name"], + role=run_input_map["agent_name"], + llm_id=run_input_map["llm_id"], + tools=tools, ) agent_name = str(uuid4()).replace("-", "") @@ -187,6 +197,7 @@ def test_fail_non_existent_llm(delete_agents_and_team_agents): AgentFactory.create( name="Test Agent", description="Test description", + role="Test Agent Role", llm_id="non_existent_llm", tools=[AgentFactory.create_model_tool(function=Function.TRANSLATION)], ) @@ -198,6 +209,7 @@ def test_delete_agent_in_use(delete_agents_and_team_agents): agent = AgentFactory.create( name="Test Agent", description="Test description", + role="Test Agent Role", tools=[AgentFactory.create_model_tool(function=Function.TRANSLATION)], ) TeamAgentFactory.create( @@ -216,7 +228,10 @@ def test_update_tools_of_agent(run_input_map, delete_agents_and_team_agents): assert delete_agents_and_team_agents agent = AgentFactory.create( - name=run_input_map["agent_name"], description=run_input_map["agent_name"], llm_id=run_input_map["llm_id"] + name=run_input_map["agent_name"], + description=run_input_map["agent_name"], + role=run_input_map["agent_name"], + llm_id=run_input_map["llm_id"], ) assert agent is not None assert agent.status == AssetStatus.DRAFT diff --git a/tests/functional/general_assets/asset_functional_test.py b/tests/functional/general_assets/asset_functional_test.py index a826ad19..effc2539 100644 --- a/tests/functional/general_assets/asset_functional_test.py +++ b/tests/functional/general_assets/asset_functional_test.py @@ -70,6 +70,13 @@ def test_model_supplier(): assert model.supplier.value in [desired_supplier.value for desired_supplier in desired_suppliers] +def test_model_ids(): + model_ids = ["674728f51ed8e18fd8a1383f", "674728f51ed8e18fd8a1383c"] + models = ModelFactory.list(model_ids=model_ids)["results"] + assert len(models) == 2 + assert sorted([model.id for model in models]) == sorted(model_ids) + + def test_model_sort(): function = Function.TRANSLATION src_language = Language.Portuguese diff --git a/tests/functional/model/run_utility_model_test.py b/tests/functional/model/run_utility_model_test.py index ce0b7579..b9ef5465 100644 --- a/tests/functional/model/run_utility_model_test.py +++ b/tests/functional/model/run_utility_model_test.py @@ -1,35 +1,147 @@ from aixplain.factories import ModelFactory -from aixplain.modules.model.utility_model import UtilityModelInput +from aixplain.modules.model.utility_model import UtilityModelInput, utility_tool from aixplain.enums import DataType - def test_run_utility_model(): - inputs = [ - UtilityModelInput(name="inputA", description="input A is the only input", type=DataType.TEXT), - ] - - output_description = "An example is 'test'" - - utility_model = ModelFactory.create_utility_model( - name="test_script", - description="This is a test script", - inputs=inputs, - code="def main(inputA: str):\n\treturn inputA", - output_examples=output_description, + utility_model = None + try: + inputs = [ + UtilityModelInput(name="inputA", description="input A is the only input", type=DataType.TEXT), + ] + + output_description = "An example is 'test'" + + utility_model = ModelFactory.create_utility_model( + name="test_script", + description="This is a test script", + inputs=inputs, + code="def main(inputA: str):\n\treturn inputA", + output_examples=output_description, + ) + + assert utility_model.id is not None + assert utility_model.inputs == inputs + assert utility_model.output_examples == output_description + + response = utility_model.run(data={"inputA": "test"}) + assert response.status == "SUCCESS" + assert response.data == "test" + + utility_model.code = "def main(inputA: str):\n\treturn 5" + utility_model.save() + response = utility_model.run(data={"inputA": "test"}) + assert response.status == "SUCCESS" + assert str(response.data) == "5" + finally: + if utility_model: + utility_model.delete() + +def test_utility_model_with_decorator(): + utility_model = None + try: + @utility_tool( + name="add_numbers_test name", + description="Adds two numbers together.", + inputs=[ + UtilityModelInput(name="num1", type=DataType.NUMBER, description="The first number."), + UtilityModelInput(name="num2", type=DataType.NUMBER, description="The second number.") + ], + ) + def add_numbers(num1: int, num2: int) -> int: + return num1 + num2 + + utility_model = ModelFactory.create_utility_model(code=add_numbers) + + assert utility_model.id is not None + assert len(utility_model.inputs) == 2 + assert utility_model.inputs[0].name == "num1" + assert utility_model.inputs[1].name == "num2" + + response = utility_model.run(data={"num1": 1, "num2": 2}) + assert response.status == "SUCCESS" + assert response.data == str(3) + finally: + if utility_model: + utility_model.delete() + +def test_utility_model_string_concatenation(): + utility_model = None + try: + @utility_tool( + name="concatenate_strings", + description="Concatenates two strings.", + inputs=[ + UtilityModelInput(name="str1", type=DataType.TEXT, description="The first string."), + UtilityModelInput(name="str2", type=DataType.TEXT, description="The second string."), + ] + ) + def concatenate_strings(str1: str, str2: str) -> str: + """Concatenates two strings and returns the result.""" + return str1 + str2 + + utility_model = ModelFactory.create_utility_model( + name="Concatenate Strings Test", + code=concatenate_strings, + ) + + assert utility_model.id is not None + assert len(utility_model.inputs) == 2 + assert utility_model.inputs[0].type == DataType.TEXT + assert utility_model.inputs[1].type == DataType.TEXT + + response = utility_model.run(data={"str1": "Hello", "str2": "World"}) + assert response.status == "SUCCESS" + assert response.data == "HelloWorld" + finally: + if utility_model: + utility_model.delete() + +def test_utility_model_code_as_string(): + utility_model = None + try: + code = f""" + @utility_tool( + name="multiply_numbers", + description="Multiply two numbers.", ) + def multiply_numbers(int1: int, int2: int) -> int: + \"\"\"Multiply two numbers and returns the result.\"\"\" + return int1 * int2 + """ + utility_model = ModelFactory.create_utility_model( + name="Multiply Numbers Test", + code=code + ) + + assert utility_model.id is not None + assert len(utility_model.inputs) == 2 - assert utility_model.id is not None - assert utility_model.inputs == inputs - assert utility_model.output_examples == output_description + response = utility_model.run(data={"int1": 2, "int2": 3}) + assert response.status == "SUCCESS" + assert response.data == str(6) + finally: + if utility_model: + utility_model.delete() - response = utility_model.run(data={"inputA": "test"}) - assert response.status == "SUCCESS" - assert response.data == "test" +def test_utility_model_simple_function(): + utility_model = None + try: + def test_string(input: str): + """test string""" + return input + + utility_model = ModelFactory.create_utility_model( + name="String Model Test", + code=test_string, + ) - utility_model.code = "def main(inputA: str):\n\treturn 5" - utility_model.update() - response = utility_model.run(data={"inputA": "test"}) - assert response.status == "SUCCESS" - assert str(response.data) == "5" + assert utility_model.id is not None + assert len(utility_model.inputs) == 1 + assert utility_model.inputs[0].type == DataType.TEXT - utility_model.delete() + response = utility_model.run(data={"input": "Hello World"}) + assert response.status == "SUCCESS" + assert response.data == "Hello World" + finally: + if utility_model: + utility_model.delete() diff --git a/tests/functional/pipelines/run_test.py b/tests/functional/pipelines/run_test.py index 7a1138bf..bc6304af 100644 --- a/tests/functional/pipelines/run_test.py +++ b/tests/functional/pipelines/run_test.py @@ -51,9 +51,7 @@ def test_get_pipeline(): def test_run_single_str(batchmode: bool, version: str): pipeline = PipelineFactory.list(query="SingleNodePipeline")["results"][0] - response = pipeline.run( - data="Translate this thing", batch_mode=batchmode, **{"version": version} - ) + response = pipeline.run(data="Translate this thing", batch_mode=batchmode, **{"version": version}) assert response["status"] == "SUCCESS" @@ -171,9 +169,7 @@ def test_run_multipipe_with_datasets(batchmode: bool, version: str): @pytest.mark.parametrize("version", ["2.0", "3.0"]) def test_run_segment_reconstruct(version: str): - pipeline = PipelineFactory.list( - query="Segmentation/Reconstruction Functional Test - DO NOT DELETE" - )["results"][0] + pipeline = PipelineFactory.list(query="Segmentation/Reconstruction Functional Test - DO NOT DELETE")["results"][0] response = pipeline.run( "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav", **{"version": version}, @@ -191,9 +187,7 @@ def test_run_translation_metric(version: str): reference_id = dataset.target_data["pt"][0].id - pipeline = PipelineFactory.list( - query="Translation Metric Functional Test - DO NOT DELETE" - )["results"][0] + pipeline = PipelineFactory.list(query="Translation Metric Functional Test - DO NOT DELETE")["results"][0] response = pipeline.run( data={"TextInput": reference_id, "ReferenceInput": reference_id}, data_asset={"TextInput": data_asset_id, "ReferenceInput": data_asset_id}, @@ -208,9 +202,7 @@ def test_run_translation_metric(version: str): @pytest.mark.parametrize("version", ["2.0", "3.0"]) def test_run_metric(version: str): - pipeline = PipelineFactory.list(query="ASR Metric Functional Test - DO NOT DELETE")[ - "results" - ][0] + pipeline = PipelineFactory.list(query="ASR Metric Functional Test - DO NOT DELETE")["results"][0] response = pipeline.run( { "AudioInput": "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav", @@ -277,9 +269,7 @@ def test_run_decision(input_data: str, output_data: str, version: str): @pytest.mark.parametrize("version", ["3.0"]) def test_run_script(version: str): - pipeline = PipelineFactory.list(query="Script Functional Test - DO NOT DELETE")[ - "results" - ][0] + pipeline = PipelineFactory.list(query="Script Functional Test - DO NOT DELETE")["results"][0] response = pipeline.run( "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav", **{"version": version}, @@ -292,9 +282,7 @@ def test_run_script(version: str): @pytest.mark.parametrize("version", ["2.0", "3.0"]) def test_run_text_reconstruction(version: str): - pipeline = PipelineFactory.list(query="Text Reconstruction - DO NOT DELETE")[ - "results" - ][0] + pipeline = PipelineFactory.list(query="Text Reconstruction - DO NOT DELETE")["results"][0] response = pipeline.run("Segment A\nSegment B\nSegment C", **{"version": version}) assert response["status"] == "SUCCESS" @@ -311,9 +299,7 @@ def test_run_text_reconstruction(version: str): @pytest.mark.parametrize("version", ["3.0"]) def test_run_diarization(version: str): - pipeline = PipelineFactory.list( - query="Diarization ASR Functional Test - DO NOT DELETE" - )["results"][0] + pipeline = PipelineFactory.list(query="Diarization ASR Functional Test - DO NOT DELETE")["results"][0] response = pipeline.run( "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav", **{"version": version}, diff --git a/tests/functional/team_agent/team_agent_functional_test.py b/tests/functional/team_agent/team_agent_functional_test.py index a402f324..58d60327 100644 --- a/tests/functional/team_agent/team_agent_functional_test.py +++ b/tests/functional/team_agent/team_agent_functional_test.py @@ -76,7 +76,11 @@ def test_end2end(run_input_map, delete_agents_and_team_agents): tools.append(AgentFactory.create_pipeline_tool(pipeline=tool["pipeline_id"], description=tool["description"])) agent = AgentFactory.create( - name=agent["agent_name"], description=agent["agent_name"], llm_id=agent["llm_id"], tools=tools + name=agent["agent_name"], + description=agent["agent_name"], + role=agent["agent_name"], + llm_id=agent["llm_id"], + tools=tools, ) agent.deploy() agents.append(agent) @@ -133,7 +137,11 @@ def test_draft_team_agent_update(run_input_map): tools.append(AgentFactory.create_pipeline_tool(pipeline=tool["pipeline_id"], description=tool["description"])) agent = AgentFactory.create( - name=agent["agent_name"], description=agent["agent_name"], llm_id=agent["llm_id"], tools=tools + name=agent["agent_name"], + description=agent["agent_name"], + role=agent["agent_name"], + llm_id=agent["llm_id"], + tools=tools, ) agents.append(agent) @@ -158,6 +166,7 @@ def test_fail_non_existent_llm(): AgentFactory.create( name="Test Agent", description="", + role="", llm_id="non_existent_llm", tools=[AgentFactory.create_model_tool(function=Function.TRANSLATION)], ) @@ -186,7 +195,11 @@ def test_add_remove_agents_from_team_agent(run_input_map, delete_agents_and_team tools.append(AgentFactory.create_pipeline_tool(pipeline=tool["pipeline_id"], description=tool["description"])) agent = AgentFactory.create( - name=agent["agent_name"], description=agent["agent_name"], llm_id=agent["llm_id"], tools=tools + name=agent["agent_name"], + description=agent["agent_name"], + role=agent["agent_name"], + llm_id=agent["llm_id"], + tools=tools, ) agents.append(agent) @@ -204,6 +217,7 @@ def test_add_remove_agents_from_team_agent(run_input_map, delete_agents_and_team new_agent = AgentFactory.create( name="New Agent", description="Agent added to team", + role="Agent added to team", llm_id=run_input_map["llm_id"], ) team_agent.agents.append(new_agent) diff --git a/tests/unit/agent_test.py b/tests/unit/agent_test.py index 10997a75..8a01bfc2 100644 --- a/tests/unit/agent_test.py +++ b/tests/unit/agent_test.py @@ -5,39 +5,45 @@ from aixplain.modules.agent import OutputFormat from aixplain.utils import config from aixplain.factories import AgentFactory -from aixplain.modules.agent import PipelineTool, ModelTool, PythonInterpreterTool, CustomPythonCodeTool +from aixplain.modules.agent.tool.pipeline_tool import PipelineTool +from aixplain.modules.agent.tool.model_tool import ModelTool +from aixplain.modules.agent.tool.python_interpreter_tool import PythonInterpreterTool +from aixplain.modules.agent.tool.custom_python_code_tool import CustomPythonCodeTool from aixplain.modules.agent.utils import process_variables from urllib.parse import urljoin from unittest.mock import patch from aixplain.enums.function import Function +from aixplain.modules.agent.agent_response import AgentResponse +from aixplain.modules.agent.agent_response_data import AgentResponseData def test_fail_no_data_query(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent(-)", "Sample Description", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async() assert str(exc_info.value) == "Either 'data' or 'query' must be provided." def test_fail_query_must_be_provided(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent", "Sample Description", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async(data={}) assert str(exc_info.value) == "When providing a dictionary, 'query' must be provided." def test_fail_query_as_text_when_content_not_empty(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent", "Sample Description", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async( data={"query": "https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav"}, content=["https://aixplain-platform-assets.s3.amazonaws.com/samples/en/CPAC1x2.wav"], ) + assert str(exc_info.value) == "When providing 'content', query must be text." def test_fail_content_exceed_maximum(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent", "Sample Description", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async( data={"query": "Transcribe the audios:"}, @@ -52,14 +58,14 @@ def test_fail_content_exceed_maximum(): def test_fail_key_not_found(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent", "Sample Description", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async(data={"query": "Translate the text: {{input1}}"}, content={"input2": "Hello, how are you?"}) assert str(exc_info.value) == "Key 'input2' not found in query." def test_success_query_content(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent(-)", "Sample Description", "Test Agent Role") with requests_mock.Mocker() as mock: url = agent.url headers = {"x-api-key": config.TEAM_API_KEY, "Content-Type": "application/json"} @@ -67,7 +73,9 @@ def test_success_query_content(): mock.post(url, headers=headers, json=ref_response) response = agent.run_async(data={"query": "Translate the text: {{input1}}"}, content={"input1": "Hello, how are you?"}) + assert isinstance(response, AgentResponse) assert response["status"] == ref_response["status"] + assert isinstance(response.data, AgentResponseData) assert response["url"] == ref_response["data"] @@ -76,6 +84,7 @@ def test_invalid_pipelinetool(): AgentFactory.create( name="Test", description="Test Description", + role="Test Role", tools=[PipelineTool(pipeline="309851793", description="Test")], llm_id="6646261c6eb563165658bbb1", ) @@ -90,14 +99,17 @@ def test_invalid_modeltool(): def test_invalid_llm_id(): with pytest.raises(Exception) as exc_info: - AgentFactory.create(name="Test", description="", tools=[], llm_id="123") + AgentFactory.create(name="Test", description="", role="", tools=[], llm_id="123") assert str(exc_info.value) == "Large Language Model with ID '123' not found." def test_invalid_agent_name(): with pytest.raises(Exception) as exc_info: - AgentFactory.create(name="[Test]", description="", tools=[], llm_id="6646261c6eb563165658bbb1") - assert str(exc_info.value) == "Agent Creation Error: Agent name must not contain special characters." + AgentFactory.create(name="[Test]", description="", role="", tools=[], llm_id="6646261c6eb563165658bbb1") + assert ( + str(exc_info.value) + == "Agent Creation Error: Agent name contains invalid characters. Only alphanumeric characters, spaces, hyphens, and brackets are allowed." + ) def test_create_agent(): @@ -117,8 +129,9 @@ def test_create_agent(): ref_response = { "id": "123", - "name": "Test Agent", + "name": "Test Agent(-)", "description": "Test Agent Description", + "role": "Test Agent Role", "teamId": "123", "version": "1.0", "status": "draft", @@ -162,8 +175,9 @@ def test_create_agent(): mock.get(url, headers=headers, json=model_ref_response) agent = AgentFactory.create( - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[ AgentFactory.create_model_tool( @@ -178,6 +192,7 @@ def test_create_agent(): assert agent.name == ref_response["name"] assert agent.description == ref_response["description"] + assert agent.role == ref_response["role"] assert agent.llm_id == ref_response["llmId"] assert agent.tools[0].function.value == ref_response["assets"][0]["function"] assert agent.tools[0].description == ref_response["assets"][0]["description"] @@ -193,8 +208,9 @@ def test_create_agent(): def test_to_dict(): agent = Agent( id="", - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[AgentFactory.create_model_tool(function="text-generation")], api_key="test_api_key", @@ -203,8 +219,9 @@ def test_to_dict(): agent_json = agent.to_dict() assert agent_json["id"] == "" - assert agent_json["name"] == "Test Agent" + assert agent_json["name"] == "Test Agent(-)" assert agent_json["description"] == "Test Agent Description" + assert agent_json["role"] == "Test Agent Role" assert agent_json["llmId"] == "6646261c6eb563165658bbb1" assert agent_json["assets"][0]["function"] == "text-generation" assert agent_json["assets"][0]["type"] == "model" @@ -214,8 +231,9 @@ def test_to_dict(): def test_update_success(): agent = Agent( id="123", - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[AgentFactory.create_model_tool(function="text-generation")], ) @@ -225,8 +243,9 @@ def test_update_success(): headers = {"x-api-key": config.TEAM_API_KEY, "Content-Type": "application/json"} ref_response = { "id": "123", - "name": "Test Agent", + "name": "Test Agent(-)", "description": "Test Agent Description", + "role": "Test Agent Role", "teamId": "123", "version": "1.0", "status": "onboarded", @@ -267,6 +286,7 @@ def test_update_success(): assert agent.id == ref_response["id"] assert agent.name == ref_response["name"] assert agent.description == ref_response["description"] + assert agent.role == ref_response["role"] assert agent.llm_id == ref_response["llmId"] assert agent.tools[0].function.value == ref_response["assets"][0]["function"] @@ -274,8 +294,9 @@ def test_update_success(): def test_save_success(): agent = Agent( id="123", - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[AgentFactory.create_model_tool(function="text-generation")], ) @@ -285,8 +306,9 @@ def test_save_success(): headers = {"x-api-key": config.TEAM_API_KEY, "Content-Type": "application/json"} ref_response = { "id": "123", - "name": "Test Agent", + "name": "Test Agent(-)", "description": "Test Agent Description", + "role": "Test Agent Role", "teamId": "123", "version": "1.0", "status": "onboarded", @@ -332,12 +354,13 @@ def test_save_success(): assert agent.id == ref_response["id"] assert agent.name == ref_response["name"] assert agent.description == ref_response["description"] + assert agent.role == ref_response["role"] assert agent.llm_id == ref_response["llmId"] assert agent.tools[0].function.value == ref_response["assets"][0]["function"] def test_run_success(): - agent = Agent("123", "Test Agent", "Sample Description") + agent = Agent("123", "Test Agent(-)", "Sample Description", "Test Agent Role") url = urljoin(config.BACKEND_URL, f"sdk/agents/{agent.id}/run") agent.url = url with requests_mock.Mocker() as mock: @@ -349,12 +372,13 @@ def test_run_success(): response = agent.run_async( data={"query": "Hello, how are you?"}, max_iterations=10, output_format=OutputFormat.MARKDOWN ) + assert isinstance(response, AgentResponse) assert response["status"] == "IN_PROGRESS" assert response["url"] == ref_response["data"] def test_run_variable_error(): - agent = Agent("123", "Test Agent", "Translate the input data into {target_language}") + agent = Agent("123", "Test Agent", "Translate the input data into {target_language}", "Test Agent Role") with pytest.raises(Exception) as exc_info: agent.run_async(data={"query": "Hello, how are you?"}, output_format=OutputFormat.MARKDOWN) assert ( @@ -383,7 +407,14 @@ def test_agent_api_key_propagation(): """Test that the api_key is properly propagated to tools when creating an agent""" custom_api_key = "custom_test_key" tool = AgentFactory.create_model_tool(function="text-generation") - agent = Agent(id="123", name="Test Agent", description="Test Description", tools=[tool], api_key=custom_api_key) + agent = Agent( + id="123", + name="Test Agent", + description="Test Description", + role="Test Agent Role", + tools=[tool], + api_key=custom_api_key, + ) # Check that the agent has the correct api_key assert agent.api_key == custom_api_key @@ -394,7 +425,7 @@ def test_agent_api_key_propagation(): def test_agent_default_api_key(): """Test that the default api_key is used when none is provided""" tool = AgentFactory.create_model_tool(function="text-generation") - agent = Agent(id="123", name="Test Agent", description="Test Description", tools=[tool]) + agent = Agent(id="123", name="Test Agent", description="Test Description", role="Test Agent Role", tools=[tool]) # Check that the agent has the default api_key assert agent.api_key == config.TEAM_API_KEY @@ -413,7 +444,9 @@ def test_agent_multiple_tools_api_key(): ), ] - agent = Agent(id="123", name="Test Agent", description="Test Description", tools=tools, api_key=custom_api_key) + agent = Agent( + id="123", name="Test Agent", description="Test Description", role="Test Agent Role", tools=tools, api_key=custom_api_key + ) # Check that all tools received the agent's api_key for tool in agent.tools: @@ -423,7 +456,7 @@ def test_agent_multiple_tools_api_key(): def test_agent_api_key_in_requests(): """Test that the api_key is properly used in API requests""" custom_api_key = "custom_test_key" - agent = Agent(id="123", name="Test Agent", description="Test Description", api_key=custom_api_key) + agent = Agent(id="123", name="Test Agent", description="Test Description", role="Test Agent Role", api_key=custom_api_key) with requests_mock.Mocker() as mock: url = agent.url @@ -438,3 +471,38 @@ def test_agent_api_key_in_requests(): assert mock.last_request.headers["x-api-key"] == custom_api_key assert response["status"] == "IN_PROGRESS" assert response["url"] == "test_url" + + +def test_agent_response(): + from aixplain.modules.agent.agent_response import AgentResponse, AgentResponseData + + response = AgentResponse( + data=AgentResponseData( + input="input", output="output", intermediate_steps=[], execution_stats={}, session_id="session_id" + ), + status="SUCCESS", + url="test_url", + details={"details": "test_details"}, + ) + # test getters + assert response["data"]["input"] == "input" + assert response.data.input == "input" + assert response["data"]["output"] == "output" + assert response.data.output == "output" + assert response["data"]["intermediate_steps"] == [] + assert response.data.intermediate_steps == [] + assert response["data"]["execution_stats"] == {} + assert response.data.execution_stats == {} + assert response["data"]["session_id"] == "session_id" + assert response.data.session_id == "session_id" + assert response["status"] == "SUCCESS" + assert response.status == "SUCCESS" + assert response["url"] == "test_url" + assert response["details"] == {"details": "test_details"} + # test setters + response["status"] = "FAILED" + assert response.status == "FAILED" + response.data["input"] = "new_input" + assert response.data.input == "new_input" + response.data.output = "new_output" + assert response["data"]["output"] == "new_output" diff --git a/tests/unit/designer_unit_test.py b/tests/unit/designer_unit_test.py index 57276a20..c8a21260 100644 --- a/tests/unit/designer_unit_test.py +++ b/tests/unit/designer_unit_test.py @@ -22,6 +22,7 @@ from aixplain.modules.pipeline.designer.pipeline import DesignerPipeline from aixplain.modules.pipeline.designer.base import find_prompt_params + def test_create_node(): pipeline = DesignerPipeline() @@ -602,9 +603,7 @@ def test_param_proxy_special_prompt_handling(): asset_node = Mock(spec=AssetNode, asset=Mock(function="text-generation")) param_proxy = ParamProxy(asset_node) - with patch( - "aixplain.modules.pipeline.designer.base.find_prompt_params" - ) as mock_find_prompt_params: + with patch("aixplain.modules.pipeline.designer.base.find_prompt_params") as mock_find_prompt_params: mock_find_prompt_params.return_value = [] param_proxy.special_prompt_handling("prompt", "hello {{foo}}") mock_find_prompt_params.assert_called_once_with("hello {{foo}}") @@ -725,17 +724,11 @@ def test_pipeline_special_prompt_validation(): assert asset_node.inputs.text.is_required is True mock_is_param_set.reset_mock() mock_is_param_set.return_value = True - with patch( - "aixplain.modules.pipeline.designer.pipeline.find_prompt_params" - ) as mock_find_prompt_params: + with patch("aixplain.modules.pipeline.designer.pipeline.find_prompt_params") as mock_find_prompt_params: mock_find_prompt_params.return_value = [] pipeline.special_prompt_validation(asset_node) - mock_is_param_set.assert_called_once_with( - asset_node, asset_node.inputs.prompt - ) - mock_find_prompt_params.assert_called_once_with( - asset_node.inputs.prompt.value - ) + mock_is_param_set.assert_called_once_with(asset_node, asset_node.inputs.prompt) + mock_find_prompt_params.assert_called_once_with(asset_node.inputs.prompt.value) assert asset_node.inputs.text.is_required is True mock_is_param_set.reset_mock() @@ -750,12 +743,8 @@ def test_pipeline_special_prompt_validation(): ): pipeline.special_prompt_validation(asset_node) - mock_is_param_set.assert_called_once_with( - asset_node, asset_node.inputs.prompt - ) - mock_find_prompt_params.assert_called_once_with( - asset_node.inputs.prompt.value - ) + mock_is_param_set.assert_called_once_with(asset_node, asset_node.inputs.prompt) + mock_find_prompt_params.assert_called_once_with(asset_node.inputs.prompt.value) assert asset_node.inputs.text.is_required is False mock_is_param_set.reset_mock() @@ -766,12 +755,8 @@ def test_pipeline_special_prompt_validation(): asset_node.inputs.__contains__ = Mock(return_value=True) pipeline.special_prompt_validation(asset_node) - mock_is_param_set.assert_called_once_with( - asset_node, asset_node.inputs.prompt - ) - mock_find_prompt_params.assert_called_once_with( - asset_node.inputs.prompt.value - ) + mock_is_param_set.assert_called_once_with(asset_node, asset_node.inputs.prompt) + mock_find_prompt_params.assert_called_once_with(asset_node.inputs.prompt.value) assert asset_node.inputs.text.is_required is False diff --git a/tests/unit/model_test.py b/tests/unit/model_test.py index 1426b7d9..548d4e98 100644 --- a/tests/unit/model_test.py +++ b/tests/unit/model_test.py @@ -190,6 +190,60 @@ def test_get_assets_from_page_error(): assert "Listing Models Error: Failed to retrieve models" in str(excinfo.value) +def test_get_model_from_ids(): + from aixplain.factories.model_factory.utils import get_model_from_ids + + with requests_mock.Mocker() as mock: + model_ids = ["test-model-id-1", "test-model-id-2"] + url = urljoin(config.BACKEND_URL, f"sdk/models?ids={','.join(model_ids)}") + headers = {"Authorization": f"Token {config.AIXPLAIN_API_KEY}", "Content-Type": "application/json"} + + ref_response = { + "items": [ + { + "id": "test-model-id-1", + "name": "Test Model 1", + "description": "Test Description 1", + "function": {"id": "text-generation"}, + "supplier": {"id": "aiXplain"}, + "pricing": {"id": "free"}, + "version": {"id": "1.0.0"}, + }, + { + "id": "test-model-id-2", + "name": "Test Model 2", + "description": "Test Description 2", + "function": {"id": "text-generation"}, + "supplier": {"id": "aiXplain"}, + "pricing": {"id": "free"}, + "version": {"id": "1.0.0"}, + }, + ] + } + mock.get(url, headers=headers, json=ref_response) + models = get_model_from_ids(model_ids) + + assert len(models) == 2 + assert models[0].id == "test-model-id-1" + assert models[1].id == "test-model-id-2" + + +def test_list_models_error(): + model_ids = ["test-model-id-1", "test-model-id-2"] + + with pytest.raises(Exception) as excinfo: + ModelFactory.list(model_ids=model_ids, function=Function.TEXT_GENERATION, api_key=config.AIXPLAIN_API_KEY) + + assert ( + str(excinfo.value) + == "Cannot filter by function, suppliers, source languages, target languages, is finetunable, ownership, sort by when using model ids" + ) + + with pytest.raises(Exception) as excinfo: + ModelFactory.list(model_ids=model_ids, page_size=1, api_key=config.AIXPLAIN_API_KEY) + assert str(excinfo.value) == "Page size must be greater than the number of model ids" + + def test_run_sync(): model_id = "test-model-id" base_url = config.MODELS_RUN_URL diff --git a/tests/unit/team_agent_test.py b/tests/unit/team_agent_test.py index e6901cec..97a8c3fa 100644 --- a/tests/unit/team_agent_test.py +++ b/tests/unit/team_agent_test.py @@ -2,7 +2,7 @@ import requests_mock from aixplain.enums.asset_status import AssetStatus from aixplain.modules import Agent, TeamAgent -from aixplain.modules.agent import ModelTool +from aixplain.modules.agent.tool.model_tool import ModelTool from aixplain.factories import TeamAgentFactory from aixplain.factories import AgentFactory from aixplain.utils import config @@ -10,7 +10,7 @@ def test_fail_no_data_query(): - team_agent = TeamAgent("123", "Test Team Agent") + team_agent = TeamAgent("123", "Test Team Agent(-)") with pytest.raises(Exception) as exc_info: team_agent.run_async() assert str(exc_info.value) == "Either 'data' or 'query' must be provided." @@ -72,7 +72,7 @@ def test_sucess_query_content(): def test_fail_number_agents(): with pytest.raises(Exception) as exc_info: - TeamAgentFactory.create(name="Test Team Agent", agents=[]) + TeamAgentFactory.create(name="Test Team Agent(-)", agents=[]) assert str(exc_info.value) == "TeamAgent Onboarding Error: At least one agent must be provided." @@ -80,12 +80,13 @@ def test_fail_number_agents(): def test_to_dict(): team_agent = TeamAgent( id="123", - name="Test Team Agent", + name="Test Team Agent(-)", agents=[ Agent( id="", - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[ModelTool(function="text-generation")], ) @@ -97,7 +98,7 @@ def test_to_dict(): team_agent_dict = team_agent.to_dict() assert team_agent_dict["id"] == "123" - assert team_agent_dict["name"] == "Test Team Agent" + assert team_agent_dict["name"] == "Test Team Agent(-)" assert team_agent_dict["description"] == "Test Team Agent Description" assert team_agent_dict["llmId"] == "6646261c6eb563165658bbb1" assert team_agent_dict["supervisorId"] == "6646261c6eb563165658bbb1" @@ -130,8 +131,9 @@ def test_create_team_agent(): url = urljoin(config.BACKEND_URL, "sdk/agents") ref_response = { "id": "123", - "name": "Test Agent", + "name": "Test Agent(-)", "description": "Test Agent Description", + "role": "Test Agent Role", "teamId": "123", "version": "1.0", "status": "draft", @@ -150,8 +152,9 @@ def test_create_team_agent(): mock.post(url, headers=headers, json=ref_response) agent = AgentFactory.create( - name="Test Agent", + name="Test Agent(-)", description="Test Agent Description", + role="Test Agent Role", llm_id="6646261c6eb563165658bbb1", tools=[ModelTool(model="6646261c6eb563165658bbb1")], ) @@ -164,7 +167,7 @@ def test_create_team_agent(): url = urljoin(config.BACKEND_URL, "sdk/agent-communities") team_ref_response = { "id": "team_agent_123", - "name": "TEST Multi agent", + "name": "TEST Multi agent(-)", "status": "draft", "teamId": 645, "description": "TEST Multi agent", @@ -180,7 +183,7 @@ def test_create_team_agent(): mock.post(url, headers=headers, json=team_ref_response) team_agent = TeamAgentFactory.create( - name="TEST Multi agent", + name="TEST Multi agent(-)", description="TEST Multi agent", use_mentalist_and_inspector=True, llm_id="6646261c6eb563165658bbb1", @@ -198,7 +201,7 @@ def test_create_team_agent(): url = urljoin(config.BACKEND_URL, f"sdk/agent-communities/{team_agent.id}") team_ref_response = { "id": "team_agent_123", - "name": "TEST Multi agent", + "name": "TEST Multi agent(-)", "status": "onboarded", "teamId": 645, "description": "TEST Multi agent", diff --git a/tests/unit/utility_test.py b/tests/unit/utility_test.py index cd901ea0..305c6a52 100644 --- a/tests/unit/utility_test.py +++ b/tests/unit/utility_test.py @@ -4,6 +4,7 @@ from urllib.parse import urljoin from aixplain.utils import config from aixplain.enums import DataType, Function +from aixplain.enums.asset_status import AssetStatus from aixplain.modules.model.utility_model import UtilityModel, UtilityModelInput from aixplain.modules.model.utils import parse_code from unittest.mock import patch @@ -17,16 +18,14 @@ def test_utility_model(): utility_model = ModelFactory.create_utility_model( name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", ) assert utility_model.id == "123" assert utility_model.name == "utility_model_test" assert utility_model.description == "utility_model_test" assert utility_model.code == "utility_model_test" - assert utility_model.inputs == [ - UtilityModelInput(name="originCode", description="The originCode input is a text", type=DataType.TEXT) - ] + assert utility_model.inputs == [UtilityModelInput(name="input_string", description="The input_string input is a text", type=DataType.TEXT)] assert utility_model.output_examples == "output_description" @@ -36,16 +35,17 @@ def test_utility_model_with_invalid_name(): with patch( "aixplain.modules.model.utils.parse_code", return_value=( - "def main(originCode: str)", + 'def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', [UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], "utility_model_test", + "utility_model_test", ), ): with pytest.raises(Exception) as exc_info: ModelFactory.create_utility_model( name="", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', inputs=[], output_examples="output_description", ) @@ -58,9 +58,10 @@ def test_utility_model_to_dict(): with patch( "aixplain.modules.model.utils.parse_code", return_value=( - "def main(originCode: str)", + 'def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', [UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], "utility_model_test", + "utility_model_test", ), ): utility_model = UtilityModel( @@ -80,95 +81,103 @@ def test_utility_model_to_dict(): "code": "utility_model_test", "function": "utilities", "outputDescription": "output_description", + "status": AssetStatus.ONBOARDED.value, } def test_update_utility_model(): with requests_mock.Mocker() as mock: - with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="def main(originCode: str)"): - with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="def main(originCode: str)"): + with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): + with patch("aixplain.factories.file_factory.FileFactory.upload", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): with patch( "aixplain.modules.model.utils.parse_code", return_value=( - "def main(originCode: str)", + 'def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', [UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], "utility_model_test", + "utility_model_test", ), ): # Mock both the model existence check and update endpoints model_id = "123" mock.get(urljoin(config.BACKEND_URL, f"sdk/models/{model_id}"), status_code=200) mock.put(urljoin(config.BACKEND_URL, f"sdk/utilities/{model_id}"), json={"id": model_id}) - + utility_model = UtilityModel( id=model_id, name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", inputs=[UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], function=Function.UTILITIES, api_key=config.TEAM_API_KEY, ) - - with pytest.warns(DeprecationWarning, match="update\(\) is deprecated and will be removed in a future version. Please use save\(\) instead."): + + with pytest.warns( + DeprecationWarning, + match="update\(\) is deprecated and will be removed in a future version. Please use save\(\) instead.", + ): utility_model.description = "updated_description" utility_model.update() assert utility_model.id == model_id assert utility_model.description == "updated_description" + def test_save_utility_model(): with requests_mock.Mocker() as mock: - with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="def main(originCode: str)"): - with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="def main(originCode: str)"): + with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): + with patch("aixplain.factories.file_factory.FileFactory.upload", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): with patch( "aixplain.modules.model.utils.parse_code", return_value=( - "def main(originCode: str)", + 'def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', [UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], "utility_model_test", + "utility_model_test", ), ): # Mock both the model existence check and the update endpoint model_id = "123" mock.get(urljoin(config.BACKEND_URL, f"sdk/models/{model_id}"), status_code=200) mock.put(urljoin(config.BACKEND_URL, f"sdk/utilities/{model_id}"), json={"id": model_id}) - + utility_model = UtilityModel( id=model_id, name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", inputs=[UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], function=Function.UTILITIES, api_key=config.TEAM_API_KEY, ) - + import warnings + # it should not trigger any warning with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") # Trigger all warnings utility_model.description = "updated_description" utility_model.save() - + assert len(w) == 0 - + assert utility_model.id == model_id assert utility_model.description == "updated_description" def test_delete_utility_model(): with requests_mock.Mocker() as mock: - with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="def main(originCode: str)"): - with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="def main(originCode: str)"): + with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): + with patch("aixplain.factories.file_factory.FileFactory.upload", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): mock.delete(urljoin(config.BACKEND_URL, "sdk/utilities/123"), status_code=200, json={"id": "123"}) utility_model = UtilityModel( id="123", name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", inputs=[UtilityModelInput(name="originCode", description="originCode", type=DataType.TEXT)], function=Function.UTILITIES, @@ -183,13 +192,13 @@ def test_parse_code(): with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="code_link"): with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="code_link"): code = "def main(originCode: str) -> str:\n return originCode" - code_link, inputs, description = parse_code(code) + code_link, inputs, description, name = parse_code(code) assert inputs == [ UtilityModelInput(name="originCode", description="The originCode input is a text", type=DataType.TEXT) ] assert description == "" assert code_link == "code_link" - + assert name == "main" # Code is a function def main(a: int, b: int): """ @@ -200,13 +209,14 @@ def main(a: int, b: int): with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="code_link"): with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="code_link"): code = main - code_link, inputs, description = parse_code(code) + code_link, inputs, description, name = parse_code(code) assert inputs == [ UtilityModelInput(name="a", description="The a input is a number", type=DataType.NUMBER), UtilityModelInput(name="b", description="The b input is a number", type=DataType.NUMBER), ] assert description == "This function adds two numbers" assert code_link == "code_link" + assert name == "main" # Code must have a main function code = "def wrong_function_name(originCode: str) -> str:\n return originCode" @@ -228,10 +238,11 @@ def main(originCode): parse_code(code) assert str(exc_info.value) == "Utility Model Error: Unsupported input type: list" + def test_validate_new_model(): """Test validation for a new model""" - with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value="def main(originCode: str)"): - with patch("aixplain.factories.file_factory.FileFactory.upload", return_value="def main(originCode: str)"): + with patch("aixplain.factories.file_factory.FileFactory.to_link", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): + with patch("aixplain.factories.file_factory.FileFactory.upload", return_value='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n'): # Test with valid inputs utility_model = UtilityModel( id="", # Empty ID for new model @@ -243,7 +254,7 @@ def test_validate_new_model(): api_key=config.TEAM_API_KEY, ) utility_model.validate() # Should not raise any exception - + # Test with empty name utility_model.name = "" with pytest.raises(Exception) as exc_info: @@ -265,6 +276,7 @@ def test_validate_new_model(): assert str(exc_info.value) == "Utility Model Error: Code must have a main function" + def test_validate_existing_model(): """Test validation for an existing model with S3 code""" with requests_mock.Mocker() as mock: @@ -284,6 +296,7 @@ def test_validate_existing_model(): ) utility_model.validate() # Should not raise any exception + def test_model_exists_success(): """Test _model_exists when model exists""" with requests_mock.Mocker() as mock: @@ -295,13 +308,14 @@ def test_model_exists_success(): id=model_id, name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", function=Function.UTILITIES, api_key=config.TEAM_API_KEY, ) assert utility_model._model_exists() is True + def test_model_exists_failure(): """Test _model_exists when model doesn't exist""" with requests_mock.Mocker() as mock: @@ -313,7 +327,7 @@ def test_model_exists_failure(): id=model_id, name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", function=Function.UTILITIES, api_key=config.TEAM_API_KEY, @@ -321,13 +335,14 @@ def test_model_exists_failure(): with pytest.raises(Exception): utility_model._model_exists() + def test_model_exists_empty_id(): """Test _model_exists with empty ID""" utility_model = UtilityModel( id="", # Empty ID name="utility_model_test", description="utility_model_test", - code="def main(originCode: str)", + code='def main(input_string:str):\n """\n Get driving directions from start_location to end_location\n """\n return f"This is the output for input: {input_string}"\n', output_examples="output_description", function=Function.UTILITIES, api_key=config.TEAM_API_KEY, diff --git a/tests/unit/utility_tool_decorator_test.py b/tests/unit/utility_tool_decorator_test.py new file mode 100644 index 00000000..f9c87f02 --- /dev/null +++ b/tests/unit/utility_tool_decorator_test.py @@ -0,0 +1,76 @@ +import pytest +from aixplain.enums import DataType +from aixplain.enums.asset_status import AssetStatus +from aixplain.modules.model.utility_model import utility_tool, UtilityModelInput + +def test_utility_tool_basic_decoration(): + """Test basic decoration with minimal parameters""" + @utility_tool( + name="test_function", + description="Test function description" + ) + def test_func(input_text: str) -> str: + return input_text + + assert hasattr(test_func, '_is_utility_tool') + assert test_func._is_utility_tool is True + assert test_func._tool_name == "test_function" + assert test_func._tool_description == "Test function description" + assert test_func._tool_inputs == [] + assert test_func._tool_output_examples == "" + assert test_func._tool_status == AssetStatus.DRAFT + +def test_utility_tool_with_all_parameters(): + """Test decoration with all optional parameters""" + inputs = [ + UtilityModelInput(name="text_input", type=DataType.TEXT, description="A text input"), + UtilityModelInput(name="num_input", type=DataType.NUMBER, description="A number input") + ] + + @utility_tool( + name="full_test_function", + description="Full test function description", + inputs=inputs, + output_examples="Example output: Hello World", + status=AssetStatus.ONBOARDED + ) + def test_func(text_input: str, num_input: int) -> str: + return f"{text_input} {num_input}" + + assert test_func._is_utility_tool is True + assert test_func._tool_name == "full_test_function" + assert test_func._tool_description == "Full test function description" + assert len(test_func._tool_inputs) == 2 + assert test_func._tool_inputs == inputs + assert test_func._tool_output_examples == "Example output: Hello World" + assert test_func._tool_status == AssetStatus.ONBOARDED + +def test_utility_tool_function_still_callable(): + """Test that decorated function remains callable""" + @utility_tool( + name="callable_test", + description="Test function callable" + ) + def test_func(x: int, y: int) -> int: + return x + y + + assert test_func(2, 3) == 5 + assert test_func._is_utility_tool is True + +def test_utility_tool_invalid_inputs(): + """Test validation of invalid inputs""" + with pytest.raises(ValueError): + @utility_tool( + name="", # Empty name should raise error + description="Test description" + ) + def test_func(): + pass + + with pytest.raises(ValueError): + @utility_tool( + name="test_name", + description="" # Empty description should raise error + ) + def test_func(): + pass \ No newline at end of file