diff --git a/openevolve/config.py b/openevolve/config.py index 1a1a19338..303e2941c 100644 --- a/openevolve/config.py +++ b/openevolve/config.py @@ -40,7 +40,6 @@ class LLMConfig(LLMModelConfig): # API configuration api_base: str = "https://api.openai.com/v1" - name: str = "gpt-4o" # Generation parameters system_message: Optional[str] = "system_message" @@ -60,10 +59,10 @@ class LLMConfig(LLMModelConfig): evaluator_models: List[LLMModelConfig] = field(default_factory=lambda: []) # Backwardes compatibility with primary_model(_weight) options - primary_model: str = "gemini-2.0-flash-lite" - primary_model_weight: float = 0.8 - secondary_model: str = "gemini-2.0-flash" - secondary_model_weight: float = 0.2 + primary_model: str = None + primary_model_weight: float = None + secondary_model: str = None + secondary_model_weight: float = None def __post_init__(self): """Post-initialization to set up model configurations""" diff --git a/openevolve/llm/openai.py b/openevolve/llm/openai.py index c146ecc0c..47a6aba2f 100644 --- a/openevolve/llm/openai.py +++ b/openevolve/llm/openai.py @@ -107,4 +107,8 @@ async def _call_api(self, params: Dict[str, Any]) -> str: response = await loop.run_in_executor( None, lambda: self.client.chat.completions.create(**params) ) + # Logging of system prompt, user message and response content + logger = logging.getLogger(__name__) + logger.debug(f"API parameters: {params}") + logger.debug(f"API response: {response.choices[0].message.content}") return response.choices[0].message.content