diff --git a/docs/guides/streaming-output.mdx b/docs/guides/streaming-output.mdx
new file mode 100644
index 000000000..fceb31a87
--- /dev/null
+++ b/docs/guides/streaming-output.mdx
@@ -0,0 +1,25 @@
+---
+title: Streaming Output
+---
+
+You can stream out all the output from Open Interpreter by adding `stream_output=function` in an `interpreter.chat()` call (optional).
+
+You can also trigger OpenInterpreter's (Y/N) Confirmation Input REMOTELY by passing `async_input={"input":None, "code_revision":None}` changing the dict['input'] will trigger the (Y/N) confirmation, place your answer there
+
+Additionally, you can pass New Code to dict['code_revision'] and it will be executed instead of the last codeblock (usefull for manual editing)
+
+```python
+## ↓ THIS FUNCTION IS CALLED ON ALL OI'S OUTPUTS
+def stream_out_hook(partial, debug = False, *a, **kw):
+ ''' THIS FUNCTION PROCESSES ALL THE OUTPUTS FROM OPEN INTERPRETER '''
+ if debug: print("STREAMING OUT! ",partial)
+ # Replace this function with one that will send the output to YOUR APPLICATION
+
+## ↓ THIS IS OBJECT BEING WATCHED FOR TRIGGERING INPUT
+async_input_data = {"input":None, "code_revision":None}
+## ↑ CHANGING async_input_data["input"] WILL TRIGGER OI'S (Y/N/OTHER) CONFIRMATION INPUT
+
+interpreter.chat(stream_out = stream_out_hook, async_input = async_input_data)
+```
+
+## For a more comprehensive & full example, please checkout [examples/stream_out.py](https://github.com/KillianLucas/open-interpreter/blob/main/docs/examples/stream_out.py)
diff --git a/docs/language-models/hosted-models/groq.mdx b/docs/language-models/hosted-models/groq.mdx
new file mode 100644
index 000000000..f0c151e46
--- /dev/null
+++ b/docs/language-models/hosted-models/groq.mdx
@@ -0,0 +1,72 @@
+---
+title: Groq
+---
+
+To use Open Interpreter with a model from Groq, simply run:
+
+
+
+```bash Terminal
+interpreter --model groq/llama3-8b-8192
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "groq/llama3-8b-8192"
+interpreter.llm.api_key = ''
+interpreter.chat()
+```
+
+
+
+If you are having any issues when passing the `--model`, try adding the `--api_base`:
+
+
+
+```bash Terminal
+interpreter --api_base "https://api.groq.com/openai/v1" --model groq/llama3-8b-8192 --api_key $GROQ_API_KEY
+```
+
+```python Python
+from interpreter import interpreter
+
+interpreter.llm.model = "groq/llama3-8b-8192"
+interpreter.llm.api_key = ''
+interpreter.llm.api_base = "https://api.groq.com/openai/v1"
+interpreter.llm.context_window = 32000
+interpreter.chat()
+```
+
+
+
+# Supported Models
+
+We support any model on [Groq's models page:](https://console.groq.com/docs/models)
+
+
+
+```bash Terminal
+interpreter --model groq/mixtral-8x7b-32768
+interpreter --model groq/llama3-8b-8192
+interpreter --model groq/llama3-70b-8192
+interpreter --model groq/gemma-7b-it
+```
+
+```python Python
+interpreter.llm.model = "groq/mixtral-8x7b-32768"
+interpreter.llm.model = "groq/llama3-8b-8192"
+interpreter.llm.model = "groq/llama3-70b-8192"
+interpreter.llm.model = "groq/gemma-7b-it"
+```
+
+
+
+# Required Environment Variables
+
+Run `export GROQ_API_KEY=''` or place it in your rc file and re-source
+Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models.
+
+| Environment Variable | Description | Where to Find |
+| -------------------- | ---------------------------------------------------- | ------------------------------------------------------------------- |
+| `GROQ_API_KEY` | The API key for authenticating to Groq's services. | [Groq Account Page](https://console.groq.com/keys) |
diff --git a/examples/stream_out.py b/examples/stream_out.py
new file mode 100644
index 000000000..01f504edf
--- /dev/null
+++ b/examples/stream_out.py
@@ -0,0 +1,195 @@
+from interpreter import interpreter, computer
+import time
+# ____ ____ __ __
+# / __ \____ ___ ____ / _/___ / /____ _________ ________ / /____ _____
+# / / / / __ \/ _ \/ __ \ / // __ \/ __/ _ \/ ___/ __ \/ ___/ _ \/ __/ _ \/ ___/
+# / /_/ / /_/ / __/ / / / _/ // / / / /_/ __/ / / /_/ / / / __/ /_/ __/ /
+# \____/ .___/\___/_/ /_/ /___/_/ /_/\__/\___/_/ / .___/_/ \___/\__/\___/_/
+# /_/ /_/
+# ____ _____ ____ _____ _ __ __ ___ _ _ _____
+# / ___|_ _| _ \| ____| / \ | \/ | / _ \| | | |_ _|
+# \___ \ | | | |_) | _| / _ \ | |\/| |_____| | | | | | | | |
+# ___) || | | _ <| |___ / ___ \| | | |_____| |_| | |_| | | |
+# |____/ |_| |_| \_\_____/_/ \_\_| |_| \___/ \___/ |_|
+
+'''
+# THIS EXAMPLE SHOWS HOW TO:
+# 1. Stream-Out All of OpenInterpreter's outputs to another process (like another UI)
+# 2. Async-Input To Trigger (Y/N/Other) Remotely
+# 3. Make Changes to the Code Before Execution
+# - If you answer Other than "y" or "n" your answer will be counted as a User Message
+# - If you manually change the code, the new revised code will be run
+'''
+
+
+interpreter.llm.model = 'mixtral-8x7b-32768'
+interpreter.llm.model = 'llama3-70b-8192'
+interpreter.llm.api_key = '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'
+interpreter.llm.api_base = "https://api.groq.com/openai/v1"
+interpreter.llm.context_window = 32000
+
+
+#______________________________________
+# Data placeholders used to do async streaming out
+from collections import deque
+
+block_queue = deque()
+full_queue = deque()
+blocks_unfinished = deque()
+pauseSend = [False]
+
+# Useful for whatsapp and other messaging apps (set to True)
+update_by_blocks = False
+ignore_formats = ['active_line']
+independent_blocks = ['confirmation', 'output'] # These will be sent as whole
+#______________________________________
+
+
+#______________________________________
+# Prep for my implemintation
+# from xo_benedict.freshClient import FreshClient #
+# client = FreshClient(_inc=3)
+#______________________________________
+
+## ↓ EXAMPLE FOR THE FINAL METHOD TO STREAM OI'S OUTPUT TO ANOTHER PROGRAM
+def _update(item, debug = False):
+ def _format(lines):
+ hSize = 4
+ return lines.replace("**Plan:**",f"Plan:").replace("**Code:**",f"Code:")
+
+ if not pauseSend[0]:
+ if debug: print(f"::: STREAMING OUT:", item)
+ stream_out = _format(str(item))
+
+ ## ↓↓↓ SEND OUT OI'S OUTPUT
+
+ #client.addText(stream_out) # Just an example, my personal implemintation
+ if debug: print(" --CHANGE THIS-- STREAMING OUTPUT: ",stream_out)
+
+ ## ↑↑↑ CHANGE THIS to something that triggers YOUR APPLICATION
+## ↑ You can change this function to one that suites your application
+
+
+
+# ↓ STREAN OUT HOOK - This function is passed into chat() and is called on every output from
+def stream_out_hook(partial, debug = False, *a, **kw):
+ # Gets Chunks from OpenInterpreter and sends them to an async update queue
+ ''' THIS FUNCTION PROCESSES ALL THE OUTPUTS FROM OPEN INTERPRETER
+ Prepares all the chunks to be sent out
+ update_by_blocks=True will batch similar messages, False will stream (unless in independent_blocks )
+ '''
+ if debug: print("STREAMING OUT! ",partial)
+
+ ## ↓ Send all the different openinterpreter chunk types to the queue
+
+ if "start" in partial and partial["start"]:
+ if update_by_blocks:
+ blocks_unfinished.append({"content":"",**partial})#,"content_parts":[],**partial})
+ else:
+ full_queue.append(partial)
+ if partial['type'] in independent_blocks or 'format' in partial and partial['format'] in independent_blocks:
+ if update_by_blocks:
+ block_queue.append({"independent":True,**partial})
+ else:
+ full_queue.append({"independent":True,**partial})
+ if debug: print("INDEPENDENT BLOCK", partial)
+ elif 'content' in partial and ('format' not in partial or partial['format'] not in ignore_formats):
+ if update_by_blocks:
+ blocks_unfinished[0]['content'] += partial['content']
+ else:
+ full_queue.append(partial['content'])
+ # blocks[-1]['content_parts'].append(partial['content'])
+ if 'end' in partial:
+ if debug: print("EEEnd",blocks_unfinished, partial)
+ fin = {**partial}
+ if update_by_blocks:
+ blocks_unfinished[0]['end'] = partial['end']
+ fin = blocks_unfinished.popleft()
+ block_queue.append(fin)
+ else:
+ full_queue.append(fin)
+
+ if debug: print("FINISHED BLOCK", fin)
+
+
+
+
+#______________________________________
+# Continuesly Recieve OpenInterpreter Chunks and Prepare them to be Sent Out
+def update_queue(debug = False, *a, **kw):
+ target = full_queue
+ if update_by_blocks:
+ target = block_queue
+ c = 0
+ while(True):
+ while(len(target) > 0):
+ leftmost_item = target.popleft()
+ if debug: print(f"{c} ::: UPDATING QUEUE:", leftmost_item)
+
+ #
+ if "start" in leftmost_item:
+ if "type" in leftmost_item and leftmost_item["type"] == "code":
+ _update("__________________________________________\n")
+ pauseSend[0] = True
+ elif "end" in leftmost_item:
+ if "type" in leftmost_item and leftmost_item["type"] == "code":
+ pauseSend[0] = False
+ elif isinstance(leftmost_item, str): _update(leftmost_item)
+ else:
+ content = "" if "content" not in leftmost_item else leftmost_item["content"]
+ if "content" in leftmost_item and not isinstance(leftmost_item["content"],str):
+ content = leftmost_item['content']['content'] if not isinstance(leftmost_item['content'],str) else leftmost_item['content']
+ if len(content) >0 and content[0] == "\n": content = content[1:]
+ if "type" in leftmost_item and leftmost_item["type"] in ["confirmation"]:
+ if len(content)>0 and content[0] != "<" and content[-1] != ">": content = ""+content+ "
"
+ _update(content+" Would you like to run this code? (Y/N)
"
+ +" You can also edit it before accepting
__________________________________________
")
+ elif "type" in leftmost_item and leftmost_item["type"] == 'console':
+ if len(content)>0 and content != "\n":
+ if debug: print(f"::: content :::{content}:::")
+ if content[0] != "<" and content[-1] != ">": content = ""+content+ "
"
+ _update(f"OUTPUT:
{content}
")
+ else:
+ _update(leftmost_item)
+ time.sleep(0.1)
+
+from threading import Thread
+update_queue_thread = Thread(target=update_queue)
+update_queue_thread.start()
+# ↑ Start Async Thread to Process Chunks Before streaming out
+#______________________________________
+
+# Run tests, one after the other
+def test_async_input(tests):
+ for i, answer, code_revision in tests:
+ # Wait {i} seconds
+ while(i>0):
+ if i%5==0: print(f"::: Testing Input:\"{answer}\" with code:{code_revision} in: {i} seconds")
+ time.sleep(1)
+ i-=1
+
+ ## ↓ TRIGGER EXTERNAL INPUT
+ async_input_data["input"] = answer
+ async_input_data["code_revision"] = code_revision
+ ## ↑ OPTIONAL CODE CHANGES
+
+ pass #print(" TEST DONE ", async_input_data)
+
+
+## ↓ THIS IS OBJECT BEING WATCHED FOR TRIGGERING INPUT
+async_input_data = {"input":None, "code_revision":None}
+## ↑ CHANGING async_input_data["input"] WILL TRIGGER OI'S INPUT
+if __name__ == "__main__":
+
+ ## Test automatic external trigger for (Y/N/Other) + code revisions
+ tests = [
+ # seconds_to_wait, input_response, new_code_to_run
+ [20, "Y", "print('SUCCESS!!!!!!!!')"],
+ # [20,"N",None],
+ # [20,"print hello {username from host} instead", None],
+ ]
+ Thread(target=test_async_input, args=[tests,]).start()
+
+ # Start OpenInterpreter
+ '''# Pass in stream_out_hook function, and async_input_data '''
+ interpreter.chat(stream_out = stream_out_hook, async_input = async_input_data)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 3a98f78b3..c4767ef5f 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -73,7 +73,8 @@ def __init__(
skills_path=None,
import_skills=False,
multi_line=False,
- contribute_conversation=False
+ contribute_conversation=False,
+ stream_out=None,
):
# State
self.messages = [] if messages is None else messages
@@ -92,6 +93,7 @@ def __init__(
self.in_terminal_interface = in_terminal_interface
self.multi_line = multi_line
self.contribute_conversation = contribute_conversation
+ self.stream_out = stream_out
# Loop messages
self.force_task_completion = force_task_completion
@@ -143,7 +145,7 @@ def will_contribute(self):
overrides = self.offline or not self.conversation_history or self.disable_telemetry
return self.contribute_conversation and not overrides
- def chat(self, message=None, display=True, stream=False, blocking=True):
+ def chat(self, message=None, display=True, stream=False, blocking=True, stream_out=None, async_input=None):
try:
self.responding = True
if self.anonymous_telemetry:
@@ -170,7 +172,14 @@ def chat(self, message=None, display=True, stream=False, blocking=True):
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
- for _ in self._streaming_chat(message=message, display=display):
+ for chunk in self._streaming_chat(message=message, display=display, async_input=async_input):
+ # Send out the stream of incoming chunks
+ # This is useful if you want to use OpenInterpreter from a different interface
+ if self.debug: print(f" ::: Streaming out: {chunk}")
+ if stream_out: stream_out(chunk) # Passed stream_out paramater takes priority over self.stream_out
+ elif self.stream_out: self.stream_out(chunk)
+
+ # if not streaming_out, then just *pull* from the stream
pass
# Return new messages
@@ -193,13 +202,13 @@ def chat(self, message=None, display=True, stream=False, blocking=True):
raise
- def _streaming_chat(self, message=None, display=True):
+ def _streaming_chat(self, message=None, display=True, async_input = None):
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
# Quite different from the plain generator stuff. So redirect to that
if display:
- yield from terminal_interface(self, message)
+ yield from terminal_interface(self, message, async_input=async_input)
return
# One-off message
diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py
index 8a7708699..cde8eea4e 100644
--- a/interpreter/core/respond.py
+++ b/interpreter/core/respond.py
@@ -143,7 +143,6 @@ def respond(interpreter):
# Is this language enabled/supported?
if interpreter.computer.terminal.get_language(language) == None:
output = f"`{language}` disabled or not supported."
-
yield {
"role": "computer",
"type": "console",
@@ -175,6 +174,12 @@ def respond(interpreter):
# We need to tell python what we (the generator) should do if they exit
break
+ # Check if the code was changed.
+ # Gives user a chance to manually edit the code before execution
+ if (interpreter.messages[-1]["type"] == "code" and code != interpreter.messages[-1]["content"]) or (interpreter.messages[-2]["type"] == "code" and code != interpreter.messages[-2]["content"]):
+ print("(Code has been modified)")
+ code = interpreter.messages[-1]["content"] if interpreter.messages[-1]["type"] == "code" else interpreter.messages[-2]["content"]
+
# don't let it import computer — we handle that!
if interpreter.computer.import_computer_api and language == "python":
code = code.replace("import computer\n", "pass\n")
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index 7d2513115..0908c8927 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -26,6 +26,7 @@
from .utils.display_output import display_output
from .utils.find_image_path import find_image_path
from .utils.cli_input import cli_input
+from .utils.async_input import input_confirmation
# Add examples to the readline history
examples = [
@@ -34,8 +35,9 @@
"Make me a simple Pomodoro app.",
"Open Chrome and go to YouTube.",
"Can you set my system to light mode?",
-]
+] # TODO Add previous session's lines to readline history!
random.shuffle(examples)
+
try:
for example in examples:
readline.add_history(example)
@@ -43,8 +45,7 @@
# If they don't have readline, that's fine
pass
-
-def terminal_interface(interpreter, message):
+def terminal_interface(interpreter, message, async_input = None):
# Auto run and offline (this.. this isnt right) don't display messages.
# Probably worth abstracting this to something like "debug_cli" at some point.
if not interpreter.auto_run and not interpreter.offline:
@@ -69,14 +70,19 @@ def terminal_interface(interpreter, message):
else:
interactive = True
+ input_feedback = None
active_block = None
voice_subprocess = None
while True:
if interactive:
- ### This is the primary input for Open Interpreter.
- message = cli_input("> ").strip() if interpreter.multi_line else input("> ").strip()
-
+ if input_feedback is None:
+ ### This is the primary input for Open Interpreter.
+ message = cli_input("> ").strip() if interpreter.multi_line else input("> ").strip()
+ else:
+ ### User has requested changes before execution
+ message = input_feedback
+ input_feedback = None
try:
# This lets users hit the up arrow key for past messages
readline.add_history(message)
@@ -254,18 +260,41 @@ def terminal_interface(interpreter, message):
if should_scan_code:
scan_code(code, language, interpreter)
- response = input(
- " Would you like to run this code? (y/n)\n\n "
- )
+ ## ↓ JOINT INPUT METHOD - FROM BOTH CLASSIC INTPUT() AND EXTERNAL INPUT
+ response, code_revision = input_confirmation(" Would you like to run this code? (y/n)\n\n ", async_input = async_input)
+ ## ↑ ALLOWS TO ACCEPT (Y/N/Other Requsts) ASWELL AS MANUAL CODE CHANGES FROM EXTERNAL PROCESS
+
+ # Resetting async_input for next time
+ if async_input != None: async_input["input"]=None; async_input["code_revision"]=None
+ if code_revision is not None:
+ ## ↓ USER MADE MANUAL CHANGES TO THE CODE
+ final_code = f"`\n\n{code_revision}\n"
+ # Changes were made, set the new code, and replace interpreter's last content
+ code = final_code
+ interpreter.messages[-1]["content"] = final_code
+
print("") # <- Aesthetic choice
- if response.strip().lower() == "y":
+ if response != None and response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
# Conveniently, the chunk includes everything we need to do this:
active_block = CodeBlock()
active_block.margin_top = False # <- Aesthetic choice
active_block.language = language
active_block.code = code
+
+ elif response != None and response != "" and response.strip().lower() != "n":
+ # User requesting changes to the code before execution.
+ # interpreter.messages.append(
+ # {
+ # "role": "user",
+ # "type": "message",
+ # "content": response,
+ # }
+ # )
+ ## ↓ ALLOW USER TO REQUEST OI TO MAKE CHANGES TO THE CODE BEFORE EXECUTION
+ input_feedback = response
+ break
else:
# User declined to run code.
interpreter.messages.append(
diff --git a/interpreter/terminal_interface/utils/async_input.py b/interpreter/terminal_interface/utils/async_input.py
new file mode 100644
index 000000000..16075e722
--- /dev/null
+++ b/interpreter/terminal_interface/utils/async_input.py
@@ -0,0 +1,72 @@
+import threading
+import time
+
+active_input = {"active":None}
+data_wrapper = {"async_input_data":{}, "canSendInput":False} # out, canSave
+
+# This uses the classic input methon (called from a thread)
+def classic_input(input_msg):
+ try:
+ ## ↓ GET CLASSIC INPUT()
+ user_input = input()
+
+ # Send classic input back to input_confirmation
+ async_input_data, canSendInput = data_wrapper["async_input_data"],data_wrapper["canSendInput"]
+ if canSendInput:
+ # set origin of input to classic
+ async_input_data["origin"] = "classic_input"
+ if user_input == "":
+ async_input_data["input"] = "N"
+ else:
+ # (Trigger) Send user input (Y/N/Other Requests)
+ async_input_data["input"] = user_input
+ except:
+ async_input_data, canSendInput = data_wrapper["async_input_data"], data_wrapper["canSendInput"]
+ if canSendInput:
+ async_input_data["input"] = "N"
+
+
+## ↓ JOINT INPUT METHOD - FROM BOTH CLASSIC INTPUT() AND EXTERNAL INPUT
+# async_input:dict = {"input":None, "code_revison":None}
+def input_confirmation(input_msg, async_input=None): # async_input:dict {"input":None,""}
+ ''' Changing `async_input` dict from an external process
+ can trigger confirmation, just like using input()
+ and also allows for manual code changes before execution
+ '''
+ if async_input == None:
+ # in case no async_input dict was provided, run normally
+ response = input(input_msg)
+ return response, None # input, code_revision
+
+ # Print the question here (Y/N)
+ print(input_msg)
+
+ # Wrap the input data, Enable classic input from
+ data_wrapper["async_input_data"], data_wrapper["canSendInput"] = async_input, True
+ # Start the classic input thread (if one isnt already active)
+ if active_input["active"] is None:
+ # If no other classic input thread is open, create one
+ threading.Thread(target=classic_input, args=[input_msg,]).start()
+ if async_input["input"] is not None: pass # Skipping, confirmation already exists
+
+
+ ## ↓ WAIT FOR EITHER EXTERNAL INPUT OR CLASSIC INPUT() TO FINISH
+ ## async_input["input"] can change from external process or from classic input()
+
+ while async_input["input"] is None:
+ time.sleep(0.1)
+
+ ## ↑ WAIT UNTIL CLASSIC INPUT() FINISHES OR EXTERNAL INPUT FINISHES
+
+
+ if "origin" in async_input and async_input["origin"] == "classic_input":
+ pass # Got answer from classic input
+ else:
+ # Got answer from External async input
+ print(f"Got external input: {async_input}")
+
+ # Disable input until next confirmation
+ data_wrapper["canSendInput"] = False
+
+ # return input from either classic or external + code revisions
+ return async_input["input"], async_input["code_revision"]