From 304aaac3653b7d92b1e873c83663ab0f744e3b9c Mon Sep 17 00:00:00 2001 From: Engel Nyst Date: Fri, 17 May 2024 18:42:02 +0200 Subject: [PATCH] clean up --- agenthub/monologue_agent/agent.py | 14 +++---- agenthub/monologue_agent/utils/prompts.py | 50 +++++------------------ opendevin/memory/condenser.py | 40 +++++++++--------- 3 files changed, 37 insertions(+), 67 deletions(-) diff --git a/agenthub/monologue_agent/agent.py b/agenthub/monologue_agent/agent.py index ec37107d609..1e7f242ba31 100644 --- a/agenthub/monologue_agent/agent.py +++ b/agenthub/monologue_agent/agent.py @@ -106,7 +106,7 @@ def _add_default_event(self, event_dict: dict): Default events are not condensed and are used to give the LLM context and examples. Parameters: - - event_dict (dict): The event that will be added to monologue and memory + - event_dict: The event that will be added to monologue and memory """ self.monologue.add_default_event(event_dict) if self.memory is not None: @@ -118,7 +118,7 @@ def _add_event(self, event_dict: dict): Monologue automatically condenses when it gets too large. Parameters: - - event (dict): The event that will be added to monologue and memory + - event_dict: The event that will be added to monologue and memory """ # truncate output if it's too long @@ -161,7 +161,7 @@ def _initialize(self, task: str): Will execute again when called after reset. Parameters: - - task (str): The initial goal statement provided by the user + - task: The initial goal statement provided by the user Raises: - AgentNoInstructionError: If task is not provided @@ -239,10 +239,10 @@ def step(self, state: State) -> Action: Modifies the current state by adding the most recent actions and observations, then prompts the model to think about it's next action to take using monologue, memory, and hint. Parameters: - - state (State): The current state based on previous steps taken + - state: The current state based on previous steps taken Returns: - - Action: The next action to take based on LLM response + - The next action to take based on LLM response """ goal = state.get_current_user_intent() @@ -276,10 +276,10 @@ def search_memory(self, query: str) -> list[str]: Uses search to produce top 10 results. Parameters: - - query (str): The query that we want to find related memories for + - The query that we want to find related memories for Returns: - - list[str]: A list of top 10 text results that matched the query + - A list of top 10 text results that matched the query """ if self.memory is None: return [] diff --git a/agenthub/monologue_agent/utils/prompts.py b/agenthub/monologue_agent/utils/prompts.py index 9fda5239e7e..b58fab17fdd 100644 --- a/agenthub/monologue_agent/utils/prompts.py +++ b/agenthub/monologue_agent/utils/prompts.py @@ -108,7 +108,7 @@ def get_summarize_prompt(default_events: list[dict], recent_events: list[dict]): Gets the prompt for summarizing the monologue Returns: - - str: A formatted string with the current monologue within the prompt + - A formatted string with the current monologue within the prompt """ return MONOLOGUE_SUMMARY_PROMPT % { 'monologue': json.dumps( @@ -128,9 +128,9 @@ def get_action_prompt( Gets the action prompt formatted with appropriate values. Parameters: - - task (str): The current task the agent is trying to accomplish - - thoughts (list[dict]): The agent's current thoughts - - background_commands_obs (list[CmdOutputObservation]): list of all observed background commands running + - task: The current task the agent is trying to accomplish + - thoughts: The agent's current thoughts + - background_commands_obs: list of all observed background commands running Returns: - str: Formatted prompt string with hint, task, monologue, and background included @@ -170,10 +170,10 @@ def format_background_commands( Formats the background commands for sending in the prompt Parameters: - - background_commands_obs (list[CmdOutputObservation]): list of all background commands running + - background_commands_obs: list of all background commands running Returns: - - str: Formatted string with all background commands + - Formatted string with all background commands """ if background_commands_obs is None or len(background_commands_obs) == 0: return '' @@ -191,10 +191,10 @@ def parse_action_response(orig_response: str) -> Action: Parses a string to find an action within it Parameters: - - response (str): The string to be parsed + - orig_response: The string to be parsed Returns: - - Action: The action that was found in the response string + - The action that was found in the response string """ # attempt to load the JSON dict from the response action_dict = json.loads(orig_response) @@ -211,40 +211,10 @@ def parse_summary_response(response: str) -> list[dict]: Parses a summary of the monologue Parameters: - - response (str): The response string to be parsed + - response: The response string to be parsed Returns: - - list[dict]: The list of summaries output by the model + - The list of summaries output by the model """ parsed = json.loads(response) return parsed['new_monologue'] - - -def generate_action_prompt_with_defaults(**kwargs): - # prepare the placeholders dict - placeholders = { - 'task': '%(task)s', - 'background_commands': '%(background_commands)s', - 'hint': '%(hint)s', - 'user': '%(user)s', - 'timeout': '%(timeout)s', - 'workspace_mount_path_in_sandbox': '%(workspace_mount_path_in_sandbox)s', - } - - # update the placeholders with the provided values - monologue = [] - formatted_kwargs = {} - for key, value in kwargs.items(): - if key in ['default_events', 'recent_events'] and value is not None: - monologue.extend(value) - elif key == 'background_commands': - formatted_kwargs[key] = format_background_commands(value) - else: - formatted_kwargs[key] = value - formatted_kwargs['monologue'] = json.dumps(monologue, indent=2) - - placeholders.update(formatted_kwargs) - - # format the template with what we have - # FIXME the split of default and recent events - return ACTION_PROMPT % placeholders diff --git a/opendevin/memory/condenser.py b/opendevin/memory/condenser.py index 37f8acc573b..25f43c03278 100644 --- a/opendevin/memory/condenser.py +++ b/opendevin/memory/condenser.py @@ -22,12 +22,12 @@ def __init__( action_prompt is a callable that returns the prompt that is about to be sent to the LLM. The prompt callable will be called with default events and recent events as arguments. - summarize_prompt, which is optional, is a callable that returns a specific prompt to tell the LLM to summarize the recent events. + summarize_prompt is a callable that returns a specific prompt that tells the LLM to summarize the recent events. The prompt callable will be called with default events and recent events as arguments. Parameters: - - action_prompt (Callable): The function to generate an action prompt. The function should accept core events and recent events as arguments. - - summarize_prompt (Callable): The function to generate a summarize prompt. The function should accept core events and recent events as arguments. + - action_prompt: The function to generate an action prompt. The function should accept default events and recent events as arguments. + - summarize_prompt: The function to generate a summarize prompt. The function should accept default events and recent events as arguments. """ self.action_prompt = action_prompt self.summarize_prompt = summarize_prompt @@ -48,13 +48,13 @@ def condense( Returns unmodified list of recent events if it is already short enough. Parameters: - - llm (LLM): LLM to be used for summarization. - - default_events (list[dict]): List of default events that should remain unchanged. - - recent_events (list[dict]): List of recent events that may be condensed. - - background_commands (list): List of background commands to be included in the prompt. + - llm: LLM to be used for summarization. + - default_events: List of default events that should remain unchanged. + - recent_events: List of recent events that may be condensed. + - background_commands: List of background commands to be included in the prompt. Returns: - - list[dict] | bool: The condensed recent events if successful, unmodified list if unnecessary, or False if condensation failed. + - The condensed recent events if successful, unmodified list if unnecessary, or False if condensation failed. """ if not background_commands: @@ -115,12 +115,12 @@ def _attempt_condense( Attempts to condense the recent events by splitting them in half and summarizing the first half. Parameters: - - llm (LLM): The llm to use for summarization. - - default_events (list[dict]): The list of default events to include in the prompt. - - recent_events (list[dict]): The list of recent events to include in the prompt. + - llm: The llm to use for summarization. + - default_events: The list of default events to include in the prompt. + - recent_events: The list of recent events to include in the prompt. Returns: - - list[dict] | None: The condensed recent events if successful, None otherwise. + - The condensed recent events if successful, None otherwise. """ # Split events @@ -157,14 +157,14 @@ def needs_condense(self, **kwargs): Checks if the prompt needs to be condensed based on the token count against the limits of the llm passed in the call. Parameters: - - llm (LLM): The llm to use for checking the token count. - - action_prompt (str, optional): The prompt to check for token count. If not provided, it will attempt to generate it using the available arguments. - - default_events (list[dict], optional): The list of default events to include in the prompt. - - recent_events (list[dict], optional): The list of recent events to include in the prompt. - - background_commands (list, optional): The list of background commands to include in the prompt. + - llm: The llm to use for checking the token count. + - action_prompt: The prompt to check for token count. If not provided, it will attempt to generate it using the available arguments. + - default_events: The list of default events to include in the prompt. + - recent_events: The list of recent events to include in the prompt. + - background_commands: The list of background commands to include in the prompt. Returns: - - bool: True if the prompt needs to be condensed, False otherwise. + - True if the prompt needs to be condensed, False otherwise. """ llm = kwargs.get('llm') action_prompt = kwargs.get('action_prompt') @@ -191,9 +191,9 @@ def get_token_limit(self, llm: LLM) -> int: Returns the token limit to use for the llm passed in the call. Parameters: - - llm (LLM): The llm to get the token limit from. + - llm: The llm to get the token limit from. Returns: - - int: The token limit of the llm. + - The token limit of the llm. """ return llm.max_input_tokens - MAX_TOKEN_COUNT_PADDING