Skip to content

Commit

Permalink
WIP Update pydantic to v2
Browse files Browse the repository at this point in the history
  • Loading branch information
kcze committed Jun 27, 2024
1 parent 6e29c07 commit 70a961f
Show file tree
Hide file tree
Showing 36 changed files with 185 additions and 149 deletions.
2 changes: 1 addition & 1 deletion autogpt/autogpt/agent_factory/configurators.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,5 +104,5 @@ def create_agent_state(
allow_fs_access=not app_config.restrict_to_workspace,
use_functions_api=app_config.openai_functions,
),
history=Agent.default_settings.history.copy(deep=True),
history=Agent.default_settings.history.model_copy(deep=True),
)
6 changes: 3 additions & 3 deletions autogpt/autogpt/agent_factory/profile_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class AgentProfileGeneratorConfiguration(SystemConfiguration):
required=True,
),
},
).dict()
).model_dump()
)


Expand All @@ -156,7 +156,7 @@ def __init__(
self._model_classification = model_classification
self._system_prompt_message = system_prompt
self._user_prompt_template = user_prompt_template
self._create_agent_function = CompletionModelFunction.parse_obj(
self._create_agent_function = CompletionModelFunction.model_validate(
create_agent_function
)

Expand Down Expand Up @@ -222,7 +222,7 @@ async def generate_agent_profile_for_task(
AIConfig: The AIConfig object tailored to the user's input
"""
agent_profile_generator = AgentProfileGenerator(
**AgentProfileGenerator.default_configuration.dict() # HACK
**AgentProfileGenerator.default_configuration.model_dump() # HACK
)

prompt = agent_profile_generator.build_prompt(task)
Expand Down
8 changes: 6 additions & 2 deletions autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def __init__(
super().__init__(settings)

self.llm_provider = llm_provider
prompt_config = OneShotAgentPromptStrategy.default_configuration.copy(deep=True)
prompt_config = OneShotAgentPromptStrategy.default_configuration.model_copy(deep=True)
prompt_config.use_functions_api = (
settings.config.use_functions_api
# Anthropic currently doesn't support tools + prefilling :(
Expand Down Expand Up @@ -150,13 +150,17 @@ async def propose_action(self) -> OneShotAgentActionProposal:
The command name and arguments, if any, and the agent's thoughts.
"""
self.reset_trace()
#TODO kcze remove
configs = self.dump_component_configs()
logger.info(configs)
self.load_component_configs(configs)

# Get directives
resources = await self.run_pipeline(DirectiveProvider.get_resources)
constraints = await self.run_pipeline(DirectiveProvider.get_constraints)
best_practices = await self.run_pipeline(DirectiveProvider.get_best_practices)

directives = self.state.directives.copy(deep=True)
directives = self.state.directives.model_copy(deep=True)
directives.resources += resources
directives.constraints += constraints
directives.best_practices += best_practices
Expand Down
6 changes: 3 additions & 3 deletions autogpt/autogpt/agents/prompt_strategies/one_shot.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(
logger: Logger,
):
self.config = configuration
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.schema())
self.response_schema = JSONSchema.from_dict(OneShotAgentActionProposal.model_json_schema())
self.logger = logger

@property
Expand Down Expand Up @@ -182,7 +182,7 @@ def build_system_prompt(
)

def response_format_instruction(self, use_functions_api: bool) -> tuple[str, str]:
response_schema = self.response_schema.copy(deep=True)
response_schema = self.response_schema.model_copy(deep=True)
assert response_schema.properties
if use_functions_api and "use_tool" in response_schema.properties:
del response_schema.properties["use_tool"]
Expand Down Expand Up @@ -274,5 +274,5 @@ def parse_response_content(
raise InvalidAgentResponseError("Assistant did not use a tool")
assistant_reply_dict["use_tool"] = response.tool_calls[0].function

parsed_response = OneShotAgentActionProposal.parse_obj(assistant_reply_dict)
parsed_response = OneShotAgentActionProposal.model_validate(assistant_reply_dict)
return parsed_response
6 changes: 3 additions & 3 deletions autogpt/autogpt/app/agent_protocol_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Ste
if last_proposal and tool_result
else {}
),
**assistant_response.dict(),
**assistant_response.model_dump(),
}

task_cumulative_cost = agent.llm_provider.get_incurred_cost()
Expand Down Expand Up @@ -451,15 +451,15 @@ def _get_task_llm_provider(self, task: Task, step_id: str = "") -> MultiProvider
"""
task_llm_budget = self._task_budgets[task.task_id]

task_llm_provider_config = self.llm_provider._configuration.copy(deep=True)
task_llm_provider_config = self.llm_provider._configuration.model_copy(deep=True)
_extra_request_headers = task_llm_provider_config.extra_request_headers
_extra_request_headers["AP-TaskID"] = task.task_id
if step_id:
_extra_request_headers["AP-StepID"] = step_id
if task.additional_input and (user_id := task.additional_input.get("user_id")):
_extra_request_headers["AutoGPT-UserID"] = user_id

settings = self.llm_provider._settings.copy()
settings = self.llm_provider._settings.model_copy()
settings.budget = task_llm_budget
settings.configuration = task_llm_provider_config
task_llm_provider = self.llm_provider.__class__(
Expand Down
24 changes: 13 additions & 11 deletions autogpt/autogpt/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
import os
import re
from pathlib import Path
from typing import Any, Optional, Union
from typing import Optional, Union

import forge
from forge.config.base import BaseConfig
from forge.llm.providers import CHAT_MODELS, ModelName
from forge.llm.providers.openai import OpenAICredentials, OpenAIModelName
from forge.logging.config import LoggingConfig
from forge.models.config import Configurable, UserConfigurable
from pydantic import SecretStr, validator
from pydantic import SecretStr, field_validator

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -91,15 +91,17 @@ class AppConfig(BaseConfig):
default=AZURE_CONFIG_FILE, from_env="AZURE_CONFIG_FILE"
)

@validator("openai_functions")
def validate_openai_functions(cls, v: bool, values: dict[str, Any]):
if v:
smart_llm = values["smart_llm"]
assert CHAT_MODELS[smart_llm].has_function_call_api, (
f"Model {smart_llm} does not support tool calling. "
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
)
return v
# TODO[pydantic]: We couldn't refactor the `validator`, please replace it by `field_validator` manually.
# Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-validators for more information.
@field_validator("openai_functions")
@classmethod
def validate_openai_functions(cls, value):
smart_llm = value["smart_llm"]
assert CHAT_MODELS[smart_llm].has_function_call_api, (
f"Model {smart_llm} does not support tool calling. "
"Please disable OPENAI_FUNCTIONS or choose a suitable model."
)
return value


class ConfigBuilder(Configurable[AppConfig]):
Expand Down
2 changes: 1 addition & 1 deletion autogpt/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def agent(
allow_fs_access=not config.restrict_to_workspace,
use_functions_api=config.openai_functions,
),
history=Agent.default_settings.history.copy(deep=True),
history=Agent.default_settings.history.model_copy(deep=True),
)

agent = Agent(
Expand Down
2 changes: 1 addition & 1 deletion autogpt/tests/integration/agent_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def dummy_agent(config: AppConfig, llm_provider: MultiProvider):
smart_llm=config.smart_llm,
use_functions_api=config.openai_functions,
),
history=Agent.default_settings.history.copy(deep=True),
history=Agent.default_settings.history.model_copy(deep=True),
)

local = config.file_storage_backend == FileStorageBackendName.LOCAL
Expand Down
6 changes: 3 additions & 3 deletions benchmark/agbenchmark/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@

logger.debug(f"Loading {challenge_relpath}...")
try:
challenge_info = ChallengeInfo.parse_file(challenge_spec_file)
challenge_info = ChallengeInfo.model_validate_json(Path(challenge_spec_file).read_text())
except ValidationError as e:
if logging.getLogger().level == logging.DEBUG:
logger.warning(f"Spec file {challenge_relpath} failed to load:\n{e}")
Expand All @@ -64,7 +64,7 @@
challenge_info.eval_id = str(uuid.uuid4())
# this will sort all the keys of the JSON systematically
# so that the order is always the same
write_pretty_json(challenge_info.dict(), challenge_spec_file)
write_pretty_json(challenge_info.model_dump(), challenge_spec_file)

CHALLENGES[challenge_info.eval_id] = challenge_info

Expand Down Expand Up @@ -153,7 +153,7 @@ def run_single_test(body: CreateReportRequest) -> dict:
pids = find_agbenchmark_without_uvicorn()
logger.info(f"pids already running with agbenchmark: {pids}")

logger.debug(f"Request to /reports: {body.dict()}")
logger.debug(f"Request to /reports: {body.model_dump()}")

# Start the benchmark in a separate thread
benchmark_process = Process(
Expand Down
23 changes: 12 additions & 11 deletions benchmark/agbenchmark/challenges/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from agent_protocol_client import Step
from colorama import Fore, Style
from openai import _load_client as get_openai_client
from pydantic import BaseModel, Field, constr, validator
from pydantic import BaseModel, Field, StringConstraints, ValidationInfo, field_validator

from agbenchmark.agent_api_interface import download_agent_artifacts_into_folder
from agbenchmark.agent_interface import copy_challenge_artifacts_into_workspace
Expand Down Expand Up @@ -46,7 +46,7 @@ class BuiltinChallengeSpec(BaseModel):

class Info(BaseModel):
difficulty: DifficultyLevel
description: Annotated[str, constr(regex=r"^Tests if the agent can.*")]
description: Annotated[str, StringConstraints(pattern=r"^Tests if the agent can.*")]
side_effects: list[str] = Field(default_factory=list)

info: Info
Expand All @@ -64,19 +64,20 @@ class Eval(BaseModel):
template: Optional[Literal["rubric", "reference", "question", "custom"]]
examples: Optional[str]

@validator("scoring", "template", always=True)
def validate_eval_fields(cls, v, values, field):
if "type" in values and values["type"] == "llm":
if v is None:
@field_validator("scoring", "template")
@classmethod
def validate_eval_fields(cls, value, info: ValidationInfo):
if "type" in info.data and info.data["type"] == "llm":
if value is None:
raise ValueError(
f"{field.name} must be provided when eval type is 'llm'"
f"{info.field_name} must be provided when eval type is 'llm'"
)
else:
if v is not None:
if value is not None:
raise ValueError(
f"{field.name} should only exist when eval type is 'llm'"
f"{info.field_name} should only exist when eval type is 'llm'"
)
return v
return value

eval: Eval

Expand Down Expand Up @@ -142,7 +143,7 @@ def from_challenge_spec(

@classmethod
def from_challenge_spec_file(cls, spec_file: Path) -> type["BuiltinChallenge"]:
challenge_spec = BuiltinChallengeSpec.parse_file(spec_file)
challenge_spec = BuiltinChallengeSpec.model_validate_json(Path(spec_file).read_text())
challenge_spec.spec_file = spec_file
return cls.from_challenge_spec(challenge_spec)

Expand Down
19 changes: 10 additions & 9 deletions benchmark/agbenchmark/challenges/webarena.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pytest
import requests
from agent_protocol_client import AgentApi, Step
from pydantic import BaseModel, ValidationError, validator
from pydantic import BaseModel, ValidationError, ValidationInfo, field_validator

from agbenchmark.config import AgentBenchmarkConfig
from agbenchmark.utils.data_types import Category, EvalResult
Expand Down Expand Up @@ -214,17 +214,18 @@ class StringMatchEvalSet(BaseModel):

eval_types: list[EvalType]

@validator("eval_types")
def check_eval_parameters(cls, v: list[EvalType], values):
if "string_match" in v and not values.get("reference_answers"):
@field_validator("eval_types")
@classmethod
def check_eval_parameters(cls, value: list[EvalType], info: ValidationInfo):
if "string_match" in value and not info.data["reference_answers"]:
raise ValueError("'string_match' eval_type requires reference_answers")
if "url_match" in v and not values.get("reference_url"):
if "url_match" in value and not info.data["reference_url"]:
raise ValueError("'url_match' eval_type requires reference_url")
if "program_html" in v and not values.get("program_html"):
if "program_html" in value and not info.data["program_html"]:
raise ValueError(
"'program_html' eval_type requires at least one program_html eval"
)
return v
return value

@property
def evaluators(self) -> list[_Eval]:
Expand Down Expand Up @@ -292,7 +293,7 @@ def from_source_uri(cls, source_uri: str) -> type["WebArenaChallenge"]:
results = requests.get(source_url).json()["data"]
if not results:
raise ValueError(f"Could not fetch challenge {source_uri}")
return cls.from_challenge_spec(WebArenaChallengeSpec.parse_obj(results[0]))
return cls.from_challenge_spec(WebArenaChallengeSpec.model_validate(results[0]))

@classmethod
def from_challenge_spec(
Expand Down Expand Up @@ -500,7 +501,7 @@ def load_webarena_challenges(
skipped = 0
for entry in challenge_dicts:
try:
challenge_spec = WebArenaChallengeSpec.parse_obj(entry)
challenge_spec = WebArenaChallengeSpec.model_validate(entry)
except ValidationError as e:
failed += 1
logger.warning(f"Error validating WebArena challenge entry: {entry}")
Expand Down
8 changes: 4 additions & 4 deletions benchmark/agbenchmark/reports/ReportManager.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,15 +113,15 @@ def save(self) -> None:
if self.report:
f.write(self.report.json(indent=4))
else:
json.dump({k: v.dict() for k, v in self.tests.items()}, f, indent=4)
json.dump({k: v.model_dump() for k, v in self.tests.items()}, f, indent=4)

def load(self) -> None:
super().load()

if "tests" in self.tests:
self.report = Report.parse_obj(self.tests)
self.report = Report.model_validate(self.tests)
else:
self.tests = {n: Test.parse_obj(d) for n, d in self.tests.items()}
self.tests = {n: Test.model_validate(d) for n, d in self.tests.items()}

def add_test_report(self, test_name: str, test_report: Test) -> None:
if self.report:
Expand Down Expand Up @@ -155,7 +155,7 @@ def finalize_session_report(self, config: AgentBenchmarkConfig) -> None:
total_cost=self.get_total_costs(),
),
tests=copy.copy(self.tests),
config=config.dict(exclude={"reports_folder"}, exclude_none=True),
config=config.model_dump(exclude={"reports_folder"}, exclude_none=True),
)

agent_categories = get_highest_achieved_difficulty_per_category(self.report)
Expand Down
2 changes: 1 addition & 1 deletion benchmark/agbenchmark/reports/processing/process_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def get_reports_data(report_path: str) -> dict[str, Any]:
with open(Path(subdir) / file, "r") as f:
# Load the JSON data from the file
json_data = json.load(f)
converted_data = Report.parse_obj(json_data)
converted_data = Report.model_validate(json_data)
# get the last directory name in the path as key
reports_data[subdir_name] = converted_data

Expand Down
23 changes: 12 additions & 11 deletions benchmark/agbenchmark/reports/processing/report_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from typing import Annotated, Any, Dict, List

from agent_protocol_client import Step
from pydantic import BaseModel, Field, constr, validator
from pydantic import BaseModel, Field, StringConstraints, ValidationInfo, field_validator

datetime_format = r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00$"
logger = logging.getLogger(__name__)
Expand All @@ -30,20 +30,21 @@ class TestResult(BaseModel):
cost: float | None = None
"""The (known) cost incurred by the run, e.g. from using paid LLM APIs"""

@validator("fail_reason")
def success_xor_fail_reason(cls, v: str | None, values: dict[str, Any]):
if bool(v) == bool(values["success"]):
@field_validator("fail_reason")
@classmethod
def success_xor_fail_reason(cls, value, info: ValidationInfo):
if bool(value) == bool(info.data["success"]):
logger.error(
"Error validating `success ^ fail_reason` on TestResult: "
f"success = {repr(values['success'])}; "
f"fail_reason = {repr(v)}"
f"success = {repr(info.data['success'])}; "
f"fail_reason = {repr(value)}"
)
if v:
success = values["success"]
if value:
success = info.data["success"]
assert not success, "fail_reason must only be specified if success=False"
else:
assert values["success"], "fail_reason is required if success=False"
return v
assert info.data["success"], "fail_reason is required if success=False"
return value


class TestMetrics(BaseModel):
Expand Down Expand Up @@ -88,7 +89,7 @@ class Test(BaseModel):
class ReportBase(BaseModel):
command: str
completion_time: str | None = None
benchmark_start_time: Annotated[str, constr(regex=datetime_format)]
benchmark_start_time: Annotated[str, StringConstraints(pattern=datetime_format)]
metrics: MetricsOverall
config: Dict[str, str | dict[str, str]]
agent_git_commit_sha: str | None = None
Expand Down
Loading

0 comments on commit 70a961f

Please sign in to comment.