diff --git a/frontend/src/services/settings.ts b/frontend/src/services/settings.ts index e5997c6f4d2..d8b3ee4af06 100644 --- a/frontend/src/services/settings.ts +++ b/frontend/src/services/settings.ts @@ -7,7 +7,7 @@ export type Settings = { export const DEFAULT_SETTINGS: Settings = { LLM_MODEL: "gpt-3.5-turbo", - AGENT: "MonologueAgent", + AGENT: "CodeActAgent", LANGUAGE: "en", LLM_API_KEY: "", }; diff --git a/opendevin/core/config.py b/opendevin/core/config.py index 90b58bc5d78..819fcdee074 100644 --- a/opendevin/core/config.py +++ b/opendevin/core/config.py @@ -46,7 +46,7 @@ # we cannot easily count number of tokens, but we can count characters. # Assuming 5 characters per token, 5 million is a reasonable default limit. ConfigType.MAX_CHARS: 5_000_000, - ConfigType.AGENT: 'MonologueAgent', + ConfigType.AGENT: 'CodeActAgent', ConfigType.E2B_API_KEY: '', ConfigType.SANDBOX_TYPE: 'ssh', # Can be 'ssh', 'exec', or 'e2b' ConfigType.USE_HOST_NETWORK: 'false',