Skip to content

Commit

Permalink
fix: update outdated configs and fix logging #95
Browse files Browse the repository at this point in the history
  • Loading branch information
chenweize1998 committed Nov 7, 2023
1 parent 067aae4 commit b0eeeea
Show file tree
Hide file tree
Showing 13 changed files with 80 additions and 63 deletions.
3 changes: 2 additions & 1 deletion agentverse/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from pydantic import BaseModel, Field
from agentverse.llms import BaseLLM

from agentverse.logging import logger
from agentverse.llms.utils import count_string_tokens
from agentverse.memory import BaseMemory, ChatHistoryMemory
from agentverse.message import Message
Expand Down Expand Up @@ -106,7 +107,7 @@ def remove_receiver(self, receiver: Union[Set[str], str]) -> None:
try:
self.receiver.remove(receiver)
except KeyError as e:
logging.warning(f"Receiver {receiver} not found.")
logger.warn(f"Receiver {receiver} not found.")
elif isinstance(receiver, set):
self.receiver = self.receiver.difference(receiver)
else:
Expand Down
14 changes: 7 additions & 7 deletions agentverse/agents/simulation_agent/prisoner_dilemma.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from __future__ import annotations

import logging
from string import Template
from typing import TYPE_CHECKING, List

from agentverse.message import Message
from agentverse.logging import logger

# from . import agent_registry
# from .base import BaseAgent
Expand All @@ -30,12 +30,12 @@ def step(
parsed_response = self.output_parser.parse(self, environment, response)
break
except Exception as e:
logging.error(e)
logging.warning("Retrying...")
logger.error(e)
logger.warn("Retrying...")
continue

if parsed_response is None:
logging.error(f"{self.name} failed to generate valid response.")
logger.error(f"{self.name} failed to generate valid response.")

message = Message(
content=""
Expand All @@ -59,12 +59,12 @@ async def astep(
parsed_response = self.output_parser.parse(self, environment, response)
break
except Exception as e:
logging.error(e)
logging.warning("Retrying...")
logger.error(e)
logger.warn("Retrying...")
continue

if parsed_response is None:
logging.error(f"{self.name} failed to generate valid response.")
logger.error(f"{self.name} failed to generate valid response.")

message = Message(
content=""
Expand Down
19 changes: 10 additions & 9 deletions agentverse/agents/simulation_agent/tool.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import logging
from string import Template
from typing import List, NamedTuple, Optional, Union

Expand All @@ -9,13 +8,15 @@
from agentverse.memory import BaseMemory, ChatHistoryMemory
from agentverse.message import Message
from agentverse.utils import AgentAction, AgentFinish
from agentverse.logging import logger

#from . import agent_registry
#from .base import BaseAgent
# from . import agent_registry
# from .base import BaseAgent

from agentverse.agents import agent_registry
from agentverse.agents.base import BaseAgent


class ToolNotExistError(BaseException):
"""Exception raised when parsing output from a command fails."""

Expand Down Expand Up @@ -50,14 +51,14 @@ def step(self, env_description: str = "") -> Message:
)
break
except BaseException as e:
logging.error(e)
logging.warning("Retrying...")
logger.error(e)
logger.warn("Retrying...")
continue
if parsed_response is None or isinstance(parsed_response, AgentFinish):
break

if parsed_response is None:
logging.error(f"{self.name} failed to generate valid response.")
logger.error(f"{self.name} failed to generate valid response.")

self._update_tool_memory(tool_observation)

Expand Down Expand Up @@ -92,14 +93,14 @@ async def astep(self, env_description: str = "") -> Message:
)
break
except BaseException as e:
logging.error(e)
logging.warning("Retrying...")
logger.error(e)
logger.warn("Retrying...")
continue
if parsed_response is None or isinstance(parsed_response, AgentFinish):
break

if parsed_response is None:
logging.error(f"{self.name} failed to generate valid response.")
logger.error(f"{self.name} failed to generate valid response.")

self._update_tool_memory(tool_observation)

Expand Down
4 changes: 2 additions & 2 deletions agentverse/environments/simulation_env/pokemon.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import asyncio
import datetime
import logging
from typing import Any, Dict, List, Optional, Set

# from agentverse.agents.agent import Agent
from agentverse.agents.simulation_agent.conversation import BaseAgent
from agentverse.logging import logger

# from agentverse.environments.simulation_env.rules.base import Rule
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
Expand Down Expand Up @@ -167,7 +167,7 @@ def get_agent_to_location(self) -> Dict[str, str]:
def print_messages(self, messages: List[Message]) -> None:
for message in messages:
if message is not None:
logging.info(f"{message.sender}: {message.content}")
logger.info(f"{message.sender}: {message.content}")

def reset(self) -> None:
"""Reset the environment"""
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
from __future__ import annotations

import logging
import re
from typing import TYPE_CHECKING, Any, List, Optional

from . import order_registry as OrderRegistry
from .base import BaseOrder
from agentverse.logging import logger

if TYPE_CHECKING:
from agentverse.environments import BaseEnvironment
Expand Down Expand Up @@ -75,7 +75,7 @@ def get_next_agent_idx_grouped(self, environment: BaseEnvironment) -> List[int]:
# `groups` should be set in the corresponding `visibility`,
# and `group_speaker_mapping` should be maintained here.
if "groups" not in environment.rule_params:
logging.warning(
logger.warn(
"The environment is grouped, but the grouping information is not provided."
)
groups = environment.rule_params.get(
Expand Down
3 changes: 2 additions & 1 deletion agentverse/environments/simulation_env/sde_team.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import json

from agentverse.agents.simulation_agent.conversation import BaseAgent
from agentverse.logging import logger

# from agentverse.environments.simulation_env.rules.base import Rule
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
Expand Down Expand Up @@ -116,7 +117,7 @@ async def step(self) -> List[Message]:
def print_messages(self, messages: List[Message]) -> None:
for message in messages:
if message is not None:
logging.info(f"{message.sender}: {message.content}")
logger.info(f"{message.sender}: {message.content}")

def reset(self) -> None:
"""Reset the environment"""
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import asyncio
import logging
from typing import Any, Dict, List
import json

Expand All @@ -8,6 +7,7 @@
# from agentverse.environments.simulation_env.rules.base import Rule
from agentverse.environments.simulation_env.rules.base import SimulationRule as Rule
from agentverse.message import Message
from agentverse.logging import logger

from .. import env_registry as EnvironmentRegistry
from ..base import BaseEnvironment
Expand Down Expand Up @@ -104,7 +104,7 @@ async def step(self) -> List[Message]:
def print_messages(self, messages: List[Message]) -> None:
for message in messages:
if message is not None:
logging.info(f"{message.sender}: {message.content}")
logger.info(f"{message.sender}: {message.content}")

def reset(self) -> None:
"""Reset the environment"""
Expand Down
6 changes: 3 additions & 3 deletions agentverse/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from openai.error import OpenAIError
except ImportError:
is_openai_available = False
logging.warning("openai package is not installed")
logger.warn("openai package is not installed")
else:
# openai.proxy = os.environ.get("http_proxy")
# if openai.proxy is None:
Expand All @@ -37,7 +37,7 @@
openai.api_version = "2023-05-15"
is_openai_available = True
else:
logging.warning(
logger.warn(
"OpenAI API key is not set. Please set the environment variable OPENAI_API_KEY"
)
is_openai_available = False
Expand Down Expand Up @@ -110,7 +110,7 @@ def __init__(self, max_retry: int = 3, **kwargs):
for k, v in args.items():
args[k] = kwargs.pop(k, v)
if len(kwargs) > 0:
logging.warning(f"Unused arguments: {kwargs}")
logger.warn(f"Unused arguments: {kwargs}")
if args["model"] in LOCAL_LLMS:
openai.api_base = "http://localhost:5000/v1"
super().__init__(args=args, max_retry=max_retry)
Expand Down
39 changes: 27 additions & 12 deletions agentverse/output_parser/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from agentverse.agents.base import BaseAgent
from agentverse.environments.base import BaseEnvironment


class OutputParserError(Exception):
"""Exception raised when parsing output from a command fails."""

Expand Down Expand Up @@ -157,24 +158,38 @@ def parse(self, output: LLMResult) -> Union[AgentAction, AgentFinish]:
class NlpClassroom9PlayersGroupParser(OutputParser):
def parse(self, output: LLMResult) -> Union[AgentAction, AgentFinish]:
text = output.content
cleaned_output = text.strip()
cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
cleaned_output = cleaned_output.split("\n")
if not (
len(cleaned_output) == 2
and cleaned_output[0].startswith("Action:")
and cleaned_output[1].startswith("Action Input:")
):
# cleaned_output = text.strip()
# cleaned_output = re.sub(r"\n+", "\n", cleaned_output)
# cleaned_output = cleaned_output.split("\n")
# if not (
# len(cleaned_output) == 2
# and cleaned_output[0].startswith("Action:")
# and cleaned_output[1].startswith("Action Input:")
# ):
# raise OutputParserError(text)
# action = cleaned_output[0][len("Action:") :].strip()
# action_input = cleaned_output[1][len("Action Input:") :].strip()
action_result = re.findall(r"Action:(.+)", text)
result = re.findall(r"Action:(.+?)Action Input:(.+)", text, re.DOTALL)
if len(action_result) == 0:
raise OutputParserError(text)
action = cleaned_output[0][len("Action:") :].strip()
action_input = cleaned_output[1][len("Action Input:") :].strip()

action = action_result[0].strip()
if action not in ["Listen", "RaiseHand"]:
if len(result) == 0:
raise OutputParserError(text)
action_input = result[0][1].strip()
if action == "RaiseHand":
action_input = ""

if action == "Speak":
return AgentFinish({"output": action_input}, text)
elif action in ["CallOn", "RaiseHand", "GroupDiscuss"]:
elif action in ["CallOn", "GroupDiscuss", "RaiseHand"]:
return AgentFinish({"output": f"[{action}] {action_input}"}, text)
elif action == "Listen":
elif action in ["Listen"]:
return AgentFinish({"output": ""}, text)
else:
action_input = result[0][1].strip()
return AgentAction(action, action_input, text)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ agents:
prompt_template: *prompt
verbose: true
llm:
llm_type: text-davinci-003
model: text-davinci-003
llm_type: gpt-4
model: gpt-4
temperature: 0.7
max_tokens: 250
output_parser:
Expand All @@ -110,8 +110,8 @@ agents:
prompt_template: *prompt
verbose: true
llm:
llm_type: text-davinci-003
model: text-davinci-003
llm_type: gpt-4
model: gpt-4
temperature: 0.7
max_tokens: 250
output_parser:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,8 @@ agents:
5. Provide an envision towards the future development of neural networks.
Your goal is to ensure that the students understand the material, so it's important to speak slowly and clearly. You don't necessarily have to strictly follow the course outline when teaching, you can also talk about some other relevant topics. Remember, in each round of conversation, your response should only address one topic at most. Please take your time and don't rush through the content.
prompt_template: *professor_prompt
llm:
llm_type: text-davinci-003
model: text-davinci-003
ll gpt-4
model: gpt-4
temperature: 0.7
max_tokens: 250
output_parser:
Expand All @@ -180,8 +179,8 @@ agents:
prompt_template: *summary_prompt
recursive: true
llm:
llm_type: text-davinci-003
model: text-davinci-003
llm_type: gpt-4
model: gpt-4
temperature: 0.7
max_tokens: 100
output_parser:
Expand All @@ -203,8 +202,8 @@ agents:
prompt_template: *summary_prompt
recursive: true
llm:
llm_type: text-davinci-003
model: text-davinci-003
llm_type: gpt-4
model: gpt-4
temperature: 0.7
max_tokens: 100
output_parser:
Expand Down
Loading

0 comments on commit b0eeeea

Please sign in to comment.