Skip to content

Commit

Permalink
clean up
Browse files Browse the repository at this point in the history
  • Loading branch information
enyst committed May 17, 2024
1 parent 4c48858 commit 52b1ba1
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 66 deletions.
14 changes: 7 additions & 7 deletions agenthub/monologue_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _add_default_event(self, event_dict: dict):
Default events are not condensed and are used to give the LLM context and examples.
Parameters:
- event_dict (dict): The event that will be added to monologue and memory
- event_dict: The event that will be added to monologue and memory
"""
self.monologue.add_default_event(event_dict)
if self.memory is not None:
Expand All @@ -118,7 +118,7 @@ def _add_event(self, event_dict: dict):
Monologue automatically condenses when it gets too large.
Parameters:
- event (dict): The event that will be added to monologue and memory
- event_dict: The event that will be added to monologue and memory
"""

# truncate output if it's too long
Expand Down Expand Up @@ -161,7 +161,7 @@ def _initialize(self, task: str):
Will execute again when called after reset.
Parameters:
- task (str): The initial goal statement provided by the user
- task: The initial goal statement provided by the user
Raises:
- AgentNoInstructionError: If task is not provided
Expand Down Expand Up @@ -239,10 +239,10 @@ def step(self, state: State) -> Action:
Modifies the current state by adding the most recent actions and observations, then prompts the model to think about it's next action to take using monologue, memory, and hint.
Parameters:
- state (State): The current state based on previous steps taken
- state: The current state based on previous steps taken
Returns:
- Action: The next action to take based on LLM response
- The next action to take based on LLM response
"""

goal = state.get_current_user_intent()
Expand Down Expand Up @@ -276,10 +276,10 @@ def search_memory(self, query: str) -> list[str]:
Uses search to produce top 10 results.
Parameters:
- query (str): The query that we want to find related memories for
- The query that we want to find related memories for
Returns:
- list[str]: A list of top 10 text results that matched the query
- A list of top 10 text results that matched the query
"""
if self.memory is None:
return []
Expand Down
48 changes: 9 additions & 39 deletions agenthub/monologue_agent/utils/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def get_summarize_prompt(default_events: list[dict], recent_events: list[dict]):
Gets the prompt for summarizing the monologue
Returns:
- str: A formatted string with the current monologue within the prompt
- A formatted string with the current monologue within the prompt
"""
return MONOLOGUE_SUMMARY_PROMPT % {
'monologue': json.dumps(

Check warning on line 114 in agenthub/monologue_agent/utils/prompts.py

View check run for this annotation

Codecov / codecov/patch

agenthub/monologue_agent/utils/prompts.py#L114

Added line #L114 was not covered by tests
Expand All @@ -128,9 +128,9 @@ def get_action_prompt(
Gets the action prompt formatted with appropriate values.
Parameters:
- task (str): The current task the agent is trying to accomplish
- thoughts (list[dict]): The agent's current thoughts
- background_commands_obs (list[CmdOutputObservation]): list of all observed background commands running
- task: The current task the agent is trying to accomplish
- thoughts: The agent's current thoughts
- background_commands_obs: list of all observed background commands running
Returns:
- str: Formatted prompt string with hint, task, monologue, and background included
Expand Down Expand Up @@ -173,7 +173,7 @@ def format_background_commands(
- background_commands_obs (list[CmdOutputObservation]): list of all background commands running
Returns:
- str: Formatted string with all background commands
- Formatted string with all background commands
"""
if background_commands_obs is None or len(background_commands_obs) == 0:
return ''
Expand All @@ -191,10 +191,10 @@ def parse_action_response(orig_response: str) -> Action:
Parses a string to find an action within it
Parameters:
- response (str): The string to be parsed
- orig_response: The string to be parsed
Returns:
- Action: The action that was found in the response string
- The action that was found in the response string
"""
# attempt to load the JSON dict from the response
action_dict = json.loads(orig_response)
Expand All @@ -211,40 +211,10 @@ def parse_summary_response(response: str) -> list[dict]:
Parses a summary of the monologue
Parameters:
- response (str): The response string to be parsed
- response: The response string to be parsed
Returns:
- list[dict]: The list of summaries output by the model
- The list of summaries output by the model
"""
parsed = json.loads(response)
return parsed['new_monologue']


def generate_action_prompt_with_defaults(**kwargs):
# prepare the placeholders dict
placeholders = {
'task': '%(task)s',
'background_commands': '%(background_commands)s',
'hint': '%(hint)s',
'user': '%(user)s',
'timeout': '%(timeout)s',
'workspace_mount_path_in_sandbox': '%(workspace_mount_path_in_sandbox)s',
}

# update the placeholders with the provided values
monologue = []
formatted_kwargs = {}
for key, value in kwargs.items():
if key in ['default_events', 'recent_events'] and value is not None:
monologue.extend(value)
elif key == 'background_commands':
formatted_kwargs[key] = format_background_commands(value)
else:
formatted_kwargs[key] = value
formatted_kwargs['monologue'] = json.dumps(monologue, indent=2)

placeholders.update(formatted_kwargs)

# format the template with what we have
# FIXME the split of default and recent events
return ACTION_PROMPT % placeholders
40 changes: 20 additions & 20 deletions opendevin/memory/condenser.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ def __init__(
action_prompt is a callable that returns the prompt that is about to be sent to the LLM.
The prompt callable will be called with default events and recent events as arguments.
summarize_prompt, which is optional, is a callable that returns a specific prompt to tell the LLM to summarize the recent events.
summarize_prompt is a callable that returns a specific prompt that tells the LLM to summarize the recent events.
The prompt callable will be called with default events and recent events as arguments.
Parameters:
- action_prompt (Callable): The function to generate an action prompt. The function should accept core events and recent events as arguments.
- summarize_prompt (Callable): The function to generate a summarize prompt. The function should accept core events and recent events as arguments.
- action_prompt: The function to generate an action prompt. The function should accept default events and recent events as arguments.
- summarize_prompt: The function to generate a summarize prompt. The function should accept default events and recent events as arguments.
"""
self.action_prompt = action_prompt
self.summarize_prompt = summarize_prompt
Expand All @@ -48,13 +48,13 @@ def condense(
Returns unmodified list of recent events if it is already short enough.
Parameters:
- llm (LLM): LLM to be used for summarization.
- default_events (list[dict]): List of default events that should remain unchanged.
- recent_events (list[dict]): List of recent events that may be condensed.
- background_commands (list): List of background commands to be included in the prompt.
- llm: LLM to be used for summarization.
- default_events: List of default events that should remain unchanged.
- recent_events: List of recent events that may be condensed.
- background_commands: List of background commands to be included in the prompt.
Returns:
- list[dict] | bool: The condensed recent events if successful, unmodified list if unnecessary, or False if condensation failed.
- The condensed recent events if successful, unmodified list if unnecessary, or False if condensation failed.
"""

if not background_commands:
Expand Down Expand Up @@ -115,12 +115,12 @@ def _attempt_condense(
Attempts to condense the recent events by splitting them in half and summarizing the first half.
Parameters:
- llm (LLM): The llm to use for summarization.
- default_events (list[dict]): The list of default events to include in the prompt.
- recent_events (list[dict]): The list of recent events to include in the prompt.
- llm: The llm to use for summarization.
- default_events: The list of default events to include in the prompt.
- recent_events: The list of recent events to include in the prompt.
Returns:
- list[dict] | None: The condensed recent events if successful, None otherwise.
- The condensed recent events if successful, None otherwise.
"""

# Split events
Expand Down Expand Up @@ -157,14 +157,14 @@ def needs_condense(self, **kwargs):
Checks if the prompt needs to be condensed based on the token count against the limits of the llm passed in the call.
Parameters:
- llm (LLM): The llm to use for checking the token count.
- action_prompt (str, optional): The prompt to check for token count. If not provided, it will attempt to generate it using the available arguments.
- default_events (list[dict], optional): The list of default events to include in the prompt.
- recent_events (list[dict], optional): The list of recent events to include in the prompt.
- background_commands (list, optional): The list of background commands to include in the prompt.
- llm: The llm to use for checking the token count.
- action_prompt: The prompt to check for token count. If not provided, it will attempt to generate it using the available arguments.
- default_events: The list of default events to include in the prompt.
- recent_events: The list of recent events to include in the prompt.
- background_commands: The list of background commands to include in the prompt.
Returns:
- bool: True if the prompt needs to be condensed, False otherwise.
- True if the prompt needs to be condensed, False otherwise.
"""
llm = kwargs.get('llm')
action_prompt = kwargs.get('action_prompt')
Expand All @@ -191,9 +191,9 @@ def get_token_limit(self, llm: LLM) -> int:
Returns the token limit to use for the llm passed in the call.
Parameters:
- llm (LLM): The llm to get the token limit from.
- llm: The llm to get the token limit from.
Returns:
- int: The token limit of the llm.
- The token limit of the llm.
"""
return llm.max_input_tokens - MAX_TOKEN_COUNT_PADDING

0 comments on commit 52b1ba1

Please sign in to comment.