Skip to content

Commit

Permalink
Merge branch 'master' into fix-bug-2801-2871-2906
Browse files Browse the repository at this point in the history
  • Loading branch information
kinance committed Apr 25, 2023
2 parents 703c24a + 58d8478 commit 314426b
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 12 deletions.
20 changes: 8 additions & 12 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import yaml
from colorama import Fore, Style

from autogpt.app import execute_command, get_command
Expand Down Expand Up @@ -150,7 +149,9 @@ def start_interaction_loop(self):
"",
)
thoughts = assistant_reply_json.get("thoughts", {})
self_feedback_resp = self.get_self_feedback(thoughts)
self_feedback_resp = self.get_self_feedback(
thoughts, cfg.fast_llm_model
)
logger.typewriter_log(
f"SELF FEEDBACK: {self_feedback_resp}",
Fore.YELLOW,
Expand Down Expand Up @@ -266,8 +267,7 @@ def _resolve_pathlike_command_args(self, command_args):
)
return command_args

@staticmethod
def get_self_feedback(thoughts: dict) -> str:
def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
"""Generates a feedback response based on the provided thoughts dictionary.
This method takes in a dictionary of thoughts containing keys such as 'reasoning',
'plan', 'thoughts', and 'criticism'. It combines these elements into a single
Expand All @@ -279,19 +279,15 @@ def get_self_feedback(thoughts: dict) -> str:
Returns:
str: A feedback response generated using the provided thoughts dictionary.
"""

with open(Config().ai_settings_file, "r") as yaml_file:
parsed_yaml = yaml.safe_load(yaml_file)
ai_role = parsed_yaml["ai_role"]
ai_role = self.config.ai_role

feedback_prompt = f"Below is a message from an AI agent with the role of {ai_role}. Please review the provided Thought, Reasoning, Plan, and Criticism. If these elements accurately contribute to the successful execution of the assumed role, respond with the letter 'Y' followed by a space, and then explain why it is effective. If the provided information is not suitable for achieving the role's objectives, please provide one or more sentences addressing the issue and suggesting a resolution."
reasoning = thoughts.get("reasoning", "")
plan = thoughts.get("plan", "")
thought = thoughts.get("thoughts", "")
criticism = thoughts.get("criticism", "")
feedback_thoughts = thought + reasoning + plan + criticism
feedback_response = create_chat_completion(
return create_chat_completion(
[{"role": "user", "content": feedback_prompt + feedback_thoughts}],
"gpt-3.5-turbo",
) # * This hardcodes the model to use GPT3.5. should be an argument
return feedback_response
llm_model,
)
40 changes: 40 additions & 0 deletions tests/unit/test_get_self_feedback.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from autogpt.agent.agent import Agent
from autogpt.config import AIConfig
from autogpt.llm_utils import create_chat_completion


def test_get_self_feedback(mocker):
# Define a sample thoughts dictionary
thoughts = {
"reasoning": "Sample reasoning.",
"plan": "Sample plan.",
"thoughts": "Sample thoughts.",
"criticism": "Sample criticism.",
}

# Define a fake response for the create_chat_completion function
fake_response = (
"Y The provided information is suitable for achieving the role's objectives."
)

# Mock the create_chat_completion function
mock_create_chat_completion = mocker.patch(
"autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion
)
mock_create_chat_completion.return_value = fake_response

# Create a MagicMock object to replace the Agent instance
agent_mock = mocker.MagicMock(spec=Agent)

# Mock the config attribute of the Agent instance
agent_mock.config = AIConfig()

# Call the get_self_feedback method
feedback = Agent.get_self_feedback(
agent_mock,
thoughts,
"gpt-3.5-turbo",
)

# Check if the response is correct
assert feedback == fake_response

0 comments on commit 314426b

Please sign in to comment.