From c7c1cf5231926d2c8944bf275e48d786be57c446 Mon Sep 17 00:00:00 2001 From: ahmetgunduz Date: Wed, 22 Oct 2025 14:59:00 +0000 Subject: [PATCH] Fix issubclass() TypeError and make temperature/top_p optional - Fix issubclass() TypeError in Agent.run_async when expected_output is not a class Add isinstance(expected_output, type) check before calling issubclass() to prevent TypeError when expected_output is a tuple or other non-class value. Fixes test_run_normalizes_expected_output_tuple_to_list_in_execution_params - Fix instructions fallback in AgentFactory.create When instructions is None, properly fall back to description instead of converting None to string 'None' - Make temperature and top_p parameters optional in LLM model Change default values from 0.001 and 1.0 to None, and only include them in parameters dict when they have non-None values - Add missing docstrings and move module docstrings to top of files to fix linting errors --- aixplain/factories/agent_factory/__init__.py | 46 +++++++++++++++---- aixplain/modules/agent/__init__.py | 4 +- aixplain/modules/model/llm_model.py | 47 ++++++++++++-------- 3 files changed, 69 insertions(+), 28 deletions(-) diff --git a/aixplain/factories/agent_factory/__init__.py b/aixplain/factories/agent_factory/__init__.py index 6f3aed51..8d9cc129 100644 --- a/aixplain/factories/agent_factory/__init__.py +++ b/aixplain/factories/agent_factory/__init__.py @@ -1,7 +1,4 @@ -__author__ = "lucaspavanelli" - -""" -Copyright 2024 The aiXplain SDK authors +"""Copyright 2024 The aiXplain SDK authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +18,8 @@ Agent Factory Class """ +__author__ = "lucaspavanelli" + import json import logging import warnings @@ -50,9 +49,18 @@ def to_literal_text(x): + """Convert value to literal text, escaping braces for string formatting. + + Args: + x: Value to convert (dict, list, or any other type) + + Returns: + str: Escaped string representation + """ s = json.dumps(x, ensure_ascii=False, indent=2) if isinstance(x, (dict, list)) else str(x) return s.replace("{", "{{").replace("}", "}}") + class AgentFactory: """Factory class for creating and managing agents in the aiXplain system. @@ -94,9 +102,11 @@ def create( api_key (Text, optional): team/user API key. Defaults to config.TEAM_API_KEY. supplier (Union[Dict, Text, Supplier, int], optional): owner of the agent. Defaults to "aiXplain". version (Optional[Text], optional): version of the agent. Defaults to None. + tasks (List[WorkflowTask], optional): Deprecated. Use workflow_tasks instead. Defaults to None. workflow_tasks (List[WorkflowTask], optional): list of tasks for the agent. Defaults to []. output_format (OutputFormat, optional): default output format for agent responses. Defaults to OutputFormat.TEXT. expected_output (Union[BaseModel, Text, dict], optional): expected output. Defaults to None. + Returns: Agent: created Agent """ @@ -137,7 +147,8 @@ def create( if tasks is not None: warnings.warn( - "The 'tasks' parameter is deprecated and will be removed in a future version. " "Use 'workflow_tasks' instead.", + "The 'tasks' parameter is deprecated and will be removed in a future version. " + "Use 'workflow_tasks' instead.", DeprecationWarning, stacklevel=2, ) @@ -149,7 +160,7 @@ def create( "name": name, "assets": [build_tool_payload(tool) for tool in tools], "description": to_literal_text(description), - "instructions": to_literal_text(instructions) or description, + "instructions": to_literal_text(instructions) if instructions is not None else description, "supplier": supplier, "version": version, "llmId": llm_id, @@ -232,6 +243,17 @@ def create_workflow_task( expected_output: Text, dependencies: Optional[List[Text]] = None, ) -> WorkflowTask: + """Create a new workflow task for an agent. + + Args: + name (Text): Name of the task + description (Text): Description of what the task does + expected_output (Text): Expected output format or content + dependencies (Optional[List[Text]], optional): List of task names this task depends on. Defaults to None. + + Returns: + WorkflowTask: Created workflow task object + """ dependencies = [] if dependencies is None else list(dependencies) return WorkflowTask( name=name, @@ -242,6 +264,11 @@ def create_workflow_task( @classmethod def create_task(cls, *args, **kwargs): + """Create a workflow task (deprecated - use create_workflow_task instead). + + .. deprecated:: + Use :meth:`create_workflow_task` instead. + """ warnings.warn( "The 'create_task' method is deprecated and will be removed in a future version. " "Use 'create_workflow_task' instead.", @@ -355,7 +382,7 @@ def create_sql_tool( tables: Optional[List[Text]] = None, enable_commit: bool = False, ) -> SQLTool: - """Create a new SQL tool + """Create a new SQL tool. Args: name (Text): name of the tool @@ -365,6 +392,7 @@ def create_sql_tool( schema (Optional[Text], optional): database schema description tables (Optional[List[Text]], optional): table names to work with (optional) enable_commit (bool, optional): enable to modify the database (optional) + Returns: SQLTool: created SQLTool @@ -407,7 +435,9 @@ def create_sql_tool( # Already the correct type, no conversion needed pass else: - raise SQLToolError(f"Source type must be either a string or DatabaseSourceType enum, got {type(source_type)}") + raise SQLToolError( + f"Source type must be either a string or DatabaseSourceType enum, got {type(source_type)}" + ) database_path = None # Final database path to pass to SQLTool diff --git a/aixplain/modules/agent/__init__.py b/aixplain/modules/agent/__init__.py index 33d0a7fb..11e176d3 100644 --- a/aixplain/modules/agent/__init__.py +++ b/aixplain/modules/agent/__init__.py @@ -325,6 +325,7 @@ def run( output_format (OutputFormat, optional): response format. If not provided, uses the format set during initialization. expected_output (Union[BaseModel, Text, dict], optional): expected output. Defaults to None. trace_request (bool, optional): return the request id for tracing the request. Defaults to False. + Returns: Dict: parsed output from model """ @@ -427,6 +428,7 @@ def run_async( output_format (ResponseFormat, optional): response format. Defaults to TEXT. evolve (Union[Dict[str, Any], EvolveParam, None], optional): evolve the agent configuration. Can be a dictionary, EvolveParam instance, or None. trace_request (bool, optional): return the request id for tracing the request. Defaults to False. + Returns: dict: polling URL in response """ @@ -490,7 +492,7 @@ def run_async( input_data = process_variables(query, data, parameters, self.instructions) if expected_output is None: expected_output = self.expected_output - if expected_output is not None and issubclass(expected_output, BaseModel): + if expected_output is not None and isinstance(expected_output, type) and issubclass(expected_output, BaseModel): expected_output = expected_output.model_json_schema() expected_output = normalize_expected_output(expected_output) # Use instance output_format if none provided diff --git a/aixplain/modules/model/llm_model.py b/aixplain/modules/model/llm_model.py index bdc6d76c..e377cc8a 100644 --- a/aixplain/modules/model/llm_model.py +++ b/aixplain/modules/model/llm_model.py @@ -1,7 +1,4 @@ -__author__ = "lucaspavanelli" - -""" -Copyright 2024 The aiXplain SDK authors +"""Copyright 2024 The aiXplain SDK authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +17,8 @@ Description: Large Language Model Class """ + +__author__ = "lucaspavanelli" import time import logging import traceback @@ -63,7 +62,7 @@ def __init__( function: Optional[Function] = None, is_subscribed: bool = False, cost: Optional[Dict] = None, - temperature: float = 0.001, + temperature: Optional[float] = None, function_type: Optional[FunctionType] = FunctionType.AI, **additional_info, ) -> None: @@ -79,14 +78,16 @@ def __init__( function (Function, optional): Model's AI function. Must be Function.TEXT_GENERATION. is_subscribed (bool, optional): Whether the user is subscribed. Defaults to False. cost (Dict, optional): Cost of the model. Defaults to None. - temperature (float, optional): Default temperature for text generation. Defaults to 0.001. + temperature (Optional[float], optional): Default temperature for text generation. Defaults to None. function_type (FunctionType, optional): Type of the function. Defaults to FunctionType.AI. **additional_info: Any additional model info to be saved. Raises: AssertionError: If function is not Function.TEXT_GENERATION. """ - assert function == Function.TEXT_GENERATION, "LLM only supports large language models (i.e. text generation function)" + assert function == Function.TEXT_GENERATION, ( + "LLM only supports large language models (i.e. text generation function)" + ) super().__init__( id=id, name=name, @@ -112,7 +113,7 @@ def run( history: Optional[List[Dict]] = None, temperature: Optional[float] = None, max_tokens: int = 128, - top_p: float = 1.0, + top_p: Optional[float] = None, name: Text = "model_process", timeout: float = 300, parameters: Optional[Dict] = None, @@ -139,8 +140,8 @@ def run( Defaults to None. max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 128. - top_p (float, optional): Nucleus sampling parameter. Only tokens with cumulative - probability < top_p are considered. Defaults to 1.0. + top_p (Optional[float], optional): Nucleus sampling parameter. Only tokens with cumulative + probability < top_p are considered. Defaults to None. name (Text, optional): Identifier for this model run. Useful for logging. Defaults to "model_process". timeout (float, optional): Maximum time in seconds to wait for completion. @@ -151,8 +152,8 @@ def run( Defaults to 0.5. stream (bool, optional): Whether to stream the model's output tokens. Defaults to False. - response_format (Optional[Union[str, dict, BaseModel]], optional): - Specifies the desired output structure or format of the model’s response. + response_format (Optional[Union[str, dict, BaseModel]], optional): + Specifies the desired output structure or format of the model’s response. Returns: Union[ModelResponse, ModelResponseStreamer]: If stream=False, returns a ModelResponse @@ -169,9 +170,12 @@ def run( parameters.setdefault("context", context) parameters.setdefault("prompt", prompt) parameters.setdefault("history", history) - parameters.setdefault("temperature", temperature if temperature is not None else self.temperature) + temp_value = temperature if temperature is not None else self.temperature + if temp_value is not None: + parameters.setdefault("temperature", temp_value) parameters.setdefault("max_tokens", max_tokens) - parameters.setdefault("top_p", top_p) + if top_p is not None: + parameters.setdefault("top_p", top_p) parameters.setdefault("response_format", response_format) if stream: @@ -214,7 +218,7 @@ def run_async( history: Optional[List[Dict]] = None, temperature: Optional[float] = None, max_tokens: int = 128, - top_p: float = 1.0, + top_p: Optional[float] = None, name: Text = "model_process", parameters: Optional[Dict] = None, response_format: Optional[Text] = None, @@ -238,12 +242,14 @@ def run_async( Defaults to None. max_tokens (int, optional): Maximum number of tokens to generate. Defaults to 128. - top_p (float, optional): Nucleus sampling parameter. Only tokens with cumulative - probability < top_p are considered. Defaults to 1.0. + top_p (Optional[float], optional): Nucleus sampling parameter. Only tokens with cumulative + probability < top_p are considered. Defaults to None. name (Text, optional): Identifier for this model run. Useful for logging. Defaults to "model_process". parameters (Optional[Dict], optional): Additional model-specific parameters. Defaults to None. + response_format (Optional[Text], optional): Desired output format specification. + Defaults to None. Returns: ModelResponse: A response object containing: @@ -266,9 +272,12 @@ def run_async( parameters.setdefault("context", context) parameters.setdefault("prompt", prompt) parameters.setdefault("history", history) - parameters.setdefault("temperature", temperature if temperature is not None else self.temperature) + temp_value = temperature if temperature is not None else self.temperature + if temp_value is not None: + parameters.setdefault("temperature", temp_value) parameters.setdefault("max_tokens", max_tokens) - parameters.setdefault("top_p", top_p) + if top_p is not None: + parameters.setdefault("top_p", top_p) parameters.setdefault("response_format", response_format) payload = build_payload(data=data, parameters=parameters) response = call_run_endpoint(payload=payload, url=url, api_key=self.api_key)