From a91e49f47e474d9f9a09d759fd0a2b7541a27829 Mon Sep 17 00:00:00 2001 From: octo-patch Date: Sun, 19 Apr 2026 09:14:33 +0800 Subject: [PATCH] fix: explicitly set stream=False to prevent SSE issues with some providers (fixes #436) --- openevolve/llm/openai.py | 1 + 1 file changed, 1 insertion(+) diff --git a/openevolve/llm/openai.py b/openevolve/llm/openai.py index 7477e5b349..beb5711819 100644 --- a/openevolve/llm/openai.py +++ b/openevolve/llm/openai.py @@ -157,6 +157,7 @@ async def generate_with_context( "temperature": kwargs.get("temperature", self.temperature), "top_p": kwargs.get("top_p", self.top_p), "max_tokens": kwargs.get("max_tokens", self.max_tokens), + "stream": False, # Explicitly request non-streaming responses to avoid SSE issues } # Handle reasoning_effort for open source reasoning models.