From 60a746f01a4e1dea37c046a1a71cf0b7142a3592 Mon Sep 17 00:00:00 2001 From: endolith Date: Mon, 3 Nov 2025 22:32:57 -0500 Subject: [PATCH 1/9] Fix "display_markdown_message is not defined" Add missing display_markdown_message import in respond.py --- interpreter/core/respond.py | 1 + 1 file changed, 1 insertion(+) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 4d91189639..3dcbbec8dc 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -8,6 +8,7 @@ import litellm import openai +from ..terminal_interface.utils.display_markdown_message import display_markdown_message from .render_message import render_message From 511fb76b114c34e0cd685165bdd4cbe2d1d7b0ec Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 17:36:23 -0500 Subject: [PATCH 2/9] Fix operator precedence in exception handler conditions Added parentheses to fix two conditions that incorrectly triggered due to operator precedence: - API key handler triggered on any "api key" error, ignoring offline status - Rate limit handler triggered on any "insufficient_quota" error, not just RateLimitError exceptions --- interpreter/core/respond.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 3dcbbec8dc..d9282d8712 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -107,8 +107,8 @@ def respond(interpreter): error_message = str(e).lower() if ( interpreter.offline == False - and "auth" in error_message - or "api key" in error_message + and ("auth" in error_message or + "api key" in error_message) ): output = traceback.format_exc() raise Exception( @@ -116,8 +116,8 @@ def respond(interpreter): ) elif ( type(e) == litellm.exceptions.RateLimitError - and "exceeded" in str(e).lower() - or "insufficient_quota" in str(e).lower() + and ("exceeded" in str(e).lower() or + "insufficient_quota" in str(e).lower()) ): display_markdown_message( f""" > You ran out of current quota for OpenAI's API, please check your plan and billing details. You can either wait for the quota to reset or upgrade your plan. From a3ac3185fa2d1cc345c79c470b98e1a7811e4bdd Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 17:42:37 -0500 Subject: [PATCH 3/9] Move comment about API key info into correct scope --- interpreter/core/respond.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index d9282d8712..6b700b181b 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -100,9 +100,6 @@ def respond(interpreter): ) break - # Provide extra information on how to change API keys, if we encounter that error - # (Many people writing GitHub issues were struggling with this) - except Exception as e: error_message = str(e).lower() if ( @@ -110,6 +107,9 @@ def respond(interpreter): and ("auth" in error_message or "api key" in error_message) ): + # Provide extra information on how to change API keys, if + # we encounter that error (Many people writing GitHub + # issues were struggling with this) output = traceback.format_exc() raise Exception( f"{output}\n\nThere might be an issue with your API key(s).\n\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here'. Update your ~/.zshrc on MacOS or ~/.bashrc on Linux with the new key if it has already been persisted there.,\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\n\n" From 33214cb250b315fd506142bd5526cc710d2bf809 Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:10:25 -0500 Subject: [PATCH 4/9] Convert string to comment --- interpreter/core/respond.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 6b700b181b..2af0a4a548 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -131,9 +131,7 @@ def respond(interpreter): elif ( interpreter.offline == False and "not have access" in str(e).lower() ): - """ - Check for invalid model in error message and then fallback. - """ + # Check for invalid model in error message and then fallback. if ( "invalid model" in error_message or "model does not exist" in error_message From 7da4a2cd2d4af56018f0851de5b9533d6b47b20e Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:15:26 -0500 Subject: [PATCH 5/9] Remove unused openai import --- interpreter/core/respond.py | 1 - 1 file changed, 1 deletion(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 2af0a4a548..13d9012474 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -6,7 +6,6 @@ os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True" import litellm -import openai from ..terminal_interface.utils.display_markdown_message import display_markdown_message from .render_message import render_message From 0070d75641d6677b318fb686cad7acf873ee6925 Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:15:43 -0500 Subject: [PATCH 6/9] Don't check for equality with None --- interpreter/core/respond.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 13d9012474..89ff7774dc 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -255,7 +255,7 @@ def respond(interpreter): continue # Is this language enabled/supported? - if interpreter.computer.terminal.get_language(language) == None: + if interpreter.computer.terminal.get_language(language) is None: output = f"`{language}` disabled or not supported." yield { From 50171c8949c795db8e5b4e4d5124e5c41c7c47f4 Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:20:52 -0500 Subject: [PATCH 7/9] Use isinstance() instead of type() == for exception type checking Replace `type(e) == litellm.exceptions.RateLimitError` with `isinstance(e, litellm.exceptions.RateLimitError)` for proper exception type checking. While technically different behavior (isinstance() supports inheritance while type() == requires exact match), isinstance() is the correct and more robust approach for exception handling. If litellm introduces subclasses of RateLimitError in the future, isinstance() will correctly catch them. --- interpreter/core/respond.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 89ff7774dc..b93341da37 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -114,7 +114,7 @@ def respond(interpreter): f"{output}\n\nThere might be an issue with your API key(s).\n\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here'. Update your ~/.zshrc on MacOS or ~/.bashrc on Linux with the new key if it has already been persisted there.,\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\n\n" ) elif ( - type(e) == litellm.exceptions.RateLimitError + isinstance(e, litellm.exceptions.RateLimitError) and ("exceeded" in str(e).lower() or "insufficient_quota" in str(e).lower()) ): From 575c5cb911acf63d6b09b2c395abdc97ad3ea50d Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:21:11 -0500 Subject: [PATCH 8/9] Use generator expression instead of list comprehension in any() Replace `any([...])` with `any(...)` using a generator expression. This improves performance by enabling short-circuiting - any() will stop evaluating as soon as it finds a True value, rather than building the entire list first. This is more memory efficient and potentially faster, especially when the first item matches. --- interpreter/core/respond.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index b93341da37..e37340be70 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -321,14 +321,12 @@ def respond(interpreter): code = re.sub(r"import computer\.\w+\n", "pass\n", code) # If it does this it sees the screenshot twice (which is expected jupyter behavior) if any( - [ - code.strip().split("\n")[-1].startswith(text) - for text in [ - "computer.display.view", - "computer.display.screenshot", - "computer.view", - "computer.screenshot", - ] + code.strip().split("\n")[-1].startswith(text) + for text in [ + "computer.display.view", + "computer.display.screenshot", + "computer.view", + "computer.screenshot", ] ): code = code + "\npass" From ebeffb9d745e66a69eb14989ee33ce80ae774654 Mon Sep 17 00:00:00 2001 From: endolith Date: Tue, 18 Nov 2025 19:29:41 -0500 Subject: [PATCH 9/9] Fix "unhighlight" typo The comment describes removing highlighting when code execution finishes. The codebase consistently uses "highlight" terminology (highlight_active_line setting, "highlight the active line" in docs). "unlightlight" is not used elsewhere and appears to be a typo for "unhighlight". --- interpreter/core/respond.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index e37340be70..d62928aeb8 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -387,7 +387,7 @@ def respond(interpreter): print(str(e)) print("Failed to sync your Computer with iComputer. Continuing.") - # yield final "active_line" message, as if to say, no more code is running. unlightlight active lines + # yield final "active_line" message, as if to say, no more code is running. unhighlight active lines # (is this a good idea? is this our responsibility? i think so — we're saying what line of code is running! ...?) yield { "role": "computer",