From 012f450e43046dc3128b27426bdeef9c4a3f46e6 Mon Sep 17 00:00:00 2001 From: Tami Date: Tue, 7 May 2024 21:23:09 +0300 Subject: [PATCH 1/6] Added streaming out functionality. Pass a function to chat(stream_out = myFunction) to process all of OI's output --- interpreter/core/core.py | 12 ++++++++++-- stream_out.py | 17 +++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) create mode 100644 stream_out.py diff --git a/interpreter/core/core.py b/interpreter/core/core.py index f33532e02..f8e35456b 100644 --- a/interpreter/core/core.py +++ b/interpreter/core/core.py @@ -73,6 +73,7 @@ def __init__( skills_path=None, import_skills=False, multi_line=False, + stream_out=None ): # State self.messages = [] if messages is None else messages @@ -90,6 +91,7 @@ def __init__( self.disable_telemetry = disable_telemetry self.in_terminal_interface = in_terminal_interface self.multi_line = multi_line + self.stream_out = stream_out # Loop messages self.force_task_completion = force_task_completion @@ -136,7 +138,7 @@ def wait(self): def anonymous_telemetry(self) -> bool: return not self.disable_telemetry and not self.offline - def chat(self, message=None, display=True, stream=False, blocking=True): + def chat(self, message=None, display=True, stream=False, blocking=True, stream_out=None): try: self.responding = True if self.anonymous_telemetry: @@ -163,7 +165,13 @@ def chat(self, message=None, display=True, stream=False, blocking=True): return self._streaming_chat(message=message, display=display) # If stream=False, *pull* from the stream. - for _ in self._streaming_chat(message=message, display=display): + for chunk in self._streaming_chat(message=message, display=display): + # Send out the stream of incoming chunks + # This is useful if you want to use OpenInterpreter from a different interface + if self.debug: print(f" ::: Streaming out: {chunk}") + if stream_out: stream_out(chunk) # Passed stream_out paramater takes priority over self.stream_out + elif self.stream_out: self.stream_out(chunk) + # if not streaming_out, then just *pull* from the stream pass # Return new messages diff --git a/stream_out.py b/stream_out.py new file mode 100644 index 000000000..784c4f565 --- /dev/null +++ b/stream_out.py @@ -0,0 +1,17 @@ +from interpreter import interpreter +# ____ ____ __ __ +# / __ \____ ___ ____ / _/___ / /____ _________ ________ / /____ _____ +# / / / / __ \/ _ \/ __ \ / // __ \/ __/ _ \/ ___/ __ \/ ___/ _ \/ __/ _ \/ ___/ +# / /_/ / /_/ / __/ / / / _/ // / / / /_/ __/ / / /_/ / / / __/ /_/ __/ / +# \____/ .___/\___/_/ /_/ /___/_/ /_/\__/\___/_/ / .___/_/ \___/\__/\___/_/ +# /_/ /_/ +# from interpreter import interpreter +interpreter.llm.model = 'mixtral-8x7b-32768' +interpreter.llm.api_key = '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' +interpreter.llm.api_base = "https://api.groq.com/openai/v1" +interpreter.llm.context_window = 32000 + +blocks = [] +def out(partial, *a, **kw): + print("STREAMING OUT! ",partial) +interpreter.chat(stream_out = out) From 1ea2af0b63ac86d72a8b4d8658386b5b8702ca23 Mon Sep 17 00:00:00 2001 From: Tami Date: Wed, 8 May 2024 18:47:34 +0300 Subject: [PATCH 2/6] Added Streaming_Out Hook for OI's Output, Added Async_Input to accept (Y/N/changes) from External process, Enabled Manual Code Changes, Enabled Make Changes Before Running (with ai) - Pre-Cleanup --- interpreter/core/core.py | 8 +- interpreter/core/respond.py | 20 +- .../terminal_interface/terminal_interface.py | 50 ++++- .../terminal_interface/utils/async_input.py | 204 ++++++++++++++++++ 4 files changed, 267 insertions(+), 15 deletions(-) create mode 100644 interpreter/terminal_interface/utils/async_input.py diff --git a/interpreter/core/core.py b/interpreter/core/core.py index f8e35456b..aedba890e 100644 --- a/interpreter/core/core.py +++ b/interpreter/core/core.py @@ -138,7 +138,7 @@ def wait(self): def anonymous_telemetry(self) -> bool: return not self.disable_telemetry and not self.offline - def chat(self, message=None, display=True, stream=False, blocking=True, stream_out=None): + def chat(self, message=None, display=True, stream=False, blocking=True, stream_out=None, async_input=None): try: self.responding = True if self.anonymous_telemetry: @@ -165,7 +165,7 @@ def chat(self, message=None, display=True, stream=False, blocking=True, stream_o return self._streaming_chat(message=message, display=display) # If stream=False, *pull* from the stream. - for chunk in self._streaming_chat(message=message, display=display): + for chunk in self._streaming_chat(message=message, display=display, async_input=async_input): # Send out the stream of incoming chunks # This is useful if you want to use OpenInterpreter from a different interface if self.debug: print(f" ::: Streaming out: {chunk}") @@ -194,13 +194,13 @@ def chat(self, message=None, display=True, stream=False, blocking=True, stream_o raise - def _streaming_chat(self, message=None, display=True): + def _streaming_chat(self, message=None, display=True, async_input = None): # Sometimes a little more code -> a much better experience! # Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface. # wraps the vanilla .chat(display=False) generator in a display. # Quite different from the plain generator stuff. So redirect to that if display: - yield from terminal_interface(self, message) + yield from terminal_interface(self, message, async_input=async_input) return # One-off message diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 84957dd23..a7fd1117f 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -128,6 +128,7 @@ def respond(interpreter): try: # What language/code do you want to run? language = interpreter.messages[-1]["format"].lower().strip() + # print("@@@@@@@@@@@@@@@@@@@\n"+str(interpreter.messages)+"\n@@@@@@@@@@") code = interpreter.messages[-1]["content"] if language == "text": @@ -138,7 +139,7 @@ def respond(interpreter): # Is this language enabled/supported? if interpreter.computer.terminal.get_language(language) == None: output = f"`{language}` disabled or not supported." - + # print(":::::::::1") yield { "role": "computer", "type": "console", @@ -149,12 +150,14 @@ def respond(interpreter): # Let the response continue so it can deal with the unsupported code in another way. Also prevent looping on the same piece of code. if code != last_unsupported_code: last_unsupported_code = code + # print(":::::::::2") continue else: break # Yield a message, such that the user can stop code execution if they want to try: + # print(":::::::::3") yield { "role": "computer", "type": "confirmation", @@ -169,9 +172,17 @@ def respond(interpreter): # The user might exit here. # We need to tell python what we (the generator) should do if they exit break + # print(":::::::::4") + # Check if code changed. Gives user a chance to manually edit the code before execution + if (interpreter.messages[-1]["type"] == "code" and code != interpreter.messages[-1]["content"]) or (interpreter.messages[-2]["type"] == "code" and code != interpreter.messages[-2]["content"]): + code = interpreter.messages[-1]["content"] if interpreter.messages[-1]["type"] == "code" else interpreter.messages[-2]["content"] + print("(Code has been modified)") + # print("@@@CODE CHANGED!@@@@@!!!!!!!@@@@@@@@@@@\n"+str(interpreter.messages)+"\n@@@@@!!!!!!!!!!!!!!@@@@@") + # print(f"&&&&CODE:::{code}:::") # don't let it import computer — we handle that! if interpreter.computer.import_computer_api and language == "python": + # print(":::::::::5") code = code.replace("import computer\n", "pass\n") code = re.sub( r"import computer\.(\w+) as (\w+)", r"\2 = computer.\1", code @@ -198,7 +209,7 @@ def respond(interpreter): interpreter.computer.verbose = interpreter.verbose interpreter.computer.debug = interpreter.debug interpreter.computer.emit_images = interpreter.llm.supports_vision - + # print(":::::::::6") # sync up the interpreter's computer with your computer try: if interpreter.sync_computer and language == "python": @@ -215,8 +226,11 @@ def respond(interpreter): print(str(e)) print("Continuing...") - ## ↓ CODE IS RUN HERE + # print(":::::::::7") + + ## ↓ CODE IS RUN HERE + # print("::::::::::::FINAL RUNNING CODE::::"+code+":::") for line in interpreter.computer.run(language, code, stream=True): yield {"role": "computer", **line} diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py index e6718c9e9..d73cdd3a7 100644 --- a/interpreter/terminal_interface/terminal_interface.py +++ b/interpreter/terminal_interface/terminal_interface.py @@ -14,6 +14,8 @@ import re import subprocess import time +import threading +import multiprocessing from ..core.utils.scan_code import scan_code from ..core.utils.system_debug_info import system_info @@ -26,8 +28,9 @@ from .utils.display_output import display_output from .utils.find_image_path import find_image_path from .utils.cli_input import cli_input - +from .utils.async_input import input_confirmation # Add examples to the readline history +# TODO Add previous session's lines to readline history examples = [ "How many files are on my desktop?", "What time is it in Seattle?", @@ -43,8 +46,7 @@ # If they don't have readline, that's fine pass - -def terminal_interface(interpreter, message): +def terminal_interface(interpreter, message, async_input = None): # Auto run and offline (this.. this isnt right) don't display messages. # Probably worth abstracting this to something like "debug_cli" at some point. if not interpreter.auto_run and not interpreter.offline: @@ -69,14 +71,19 @@ def terminal_interface(interpreter, message): else: interactive = True + input_feedback = None active_block = None voice_subprocess = None while True: if interactive: - ### This is the primary input for Open Interpreter. - message = cli_input("> ").strip() if interpreter.multi_line else input("> ").strip() - + if input_feedback is None: + ### This is the primary input for Open Interpreter. + message = cli_input("> ").strip() if interpreter.multi_line else input("> ").strip() + else: + # User has already left a message using the confirmation input + message = input_feedback + input_feedback = None try: # This lets users hit the up arrow key for past messages readline.add_history(message) @@ -254,18 +261,45 @@ def terminal_interface(interpreter, message): if should_scan_code: scan_code(code, language, interpreter) - response = input( + response, code_revision = input_confirmation( " Would you like to run this code? (y/n)\n\n " + , async_input = async_input ) + # print("::: input_confirmation", response, code_revision) + # Resetting async_input for next time + # if async_input != None: async_input[0]=None; async_input[1]=None + if async_input != None: async_input["input"]=None; async_input["code_revision"]=None + + # Allow user to manually edit the code before execution + if code_revision is not None: + #Replacing Interpreter Last Content + # print("XXXXXX current code:::"+str(interpreter.messages)+":::") + final_code = f"`\n\n{code_revision}\n" + interpreter.messages[-1]["content"] = final_code + # print("YYYYYY current code:::"+str(interpreter.messages)+":::") + + code = final_code print("") # <- Aesthetic choice - if response.strip().lower() == "y": + if response != None and response.strip().lower() == "y": # Create a new, identical block where the code will actually be run # Conveniently, the chunk includes everything we need to do this: active_block = CodeBlock() active_block.margin_top = False # <- Aesthetic choice active_block.language = language active_block.code = code + + elif response != None and response != "" and response.strip().lower() != "n": + # User requesting changes to the code before execution. + # interpreter.messages.append( + # { + # "role": "user", + # "type": "message", + # "content": response, + # } + # ) + input_feedback = response + break else: # User declined to run code. interpreter.messages.append( diff --git a/interpreter/terminal_interface/utils/async_input.py b/interpreter/terminal_interface/utils/async_input.py new file mode 100644 index 000000000..ee6bc9986 --- /dev/null +++ b/interpreter/terminal_interface/utils/async_input.py @@ -0,0 +1,204 @@ +import threading +# import multiprocessing +# import ctypes +# import inspect +import time +# import io, sys + +# def _async_raise(tid, exctype): +# '''Raises an exception in the threads with id tid''' +# if not inspect.isclass(exctype): +# raise TypeError("Only types can be raised (not instances)") +# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), +# ctypes.py_object(exctype)) +# if res == 0: +# raise ValueError("invalid thread id") +# elif res != 1: +# # "if it returns a number greater than one, you're in trouble, +# # and you should call it again with exc=NULL to revert the effect" +# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None) +# raise SystemError("PyThreadState_SetAsyncExc failed") + +# class ThreadWithExc(threading.Thread): +# '''A thread class that supports raising an exception in the thread from +# another thread. +# ''' +# def _get_my_tid(self): +# """determines this (self's) thread id + +# CAREFUL: this function is executed in the context of the caller +# thread, to get the identity of the thread represented by this +# instance. +# """ +# if not self.is_alive(): # Note: self.isAlive() on older version of Python +# raise threading.ThreadError("the thread is not active") + +# # do we have it cached? +# if hasattr(self, "_thread_id"): +# return self._thread_id + +# # no, look for it in the _active dict +# for tid, tobj in threading._active.items(): +# if tobj is self: +# self._thread_id = tid +# return tid + +# # TODO: in python 2.6, there's a simpler way to do: self.ident + +# raise AssertionError("could not determine the thread's id") + +# def raise_exc(self, exctype): +# """Raises the given exception type in the context of this thread. + +# If the thread is busy in a system call (time.sleep(), +# socket.accept(), ...), the exception is simply ignored. + +# If you are sure that your exception should terminate the thread, +# one way to ensure that it works is: + +# t = ThreadWithExc( ... ) +# ... +# t.raise_exc( SomeException ) +# while t.isAlive(): +# time.sleep( 0.1 ) +# t.raise_exc( SomeException ) + +# If the exception is to be caught by the thread, you need a way to +# check that your thread has caught it. + +# CAREFUL: this function is executed in the context of the +# caller thread, to raise an exception in the context of the +# thread represented by this instance. +# """ +# _async_raise( self._get_my_tid(), exctype ) + +active_input = {"active":None} +# def classic_input(queue): +# data_wrapper = [[], False] # out, canSave +data_wrapper = {"async_input_data":{}, "canSendInput":False} # out, canSave +# def classic_input(ret, prompt): +def classic_input(prompt): + + # async_input = ret()[0] + # print("Simulating input:", simulated_input) + # s = sys + # print("::::") + + # ret = queue.get() + # final = [None,None] + try: + # stdin = open(0) + if False: + final = [None] + def inputLayer(final): + active_input["active"] = "blocked" + # last = input(prompt) + last = input() + + # print("LAST:",last) + final[0] = last + active_input["active"] = None + threading.Thread(target=inputLayer, args=[final,]).start() + # print("AWAITING INPUT LAYER") + while(final[0] is None): + time.sleep(0.111) + res = final[0] + res = input() + # print("INPUT LAYER RES:",res) + # print("********************* (Y/N)", end="", flush=True) + # res = stdin.readline() + # queue.put(x) + # print("$$$ canSendInput {canSendInput} GOT CLASSIC INPUT:",res, flush=True) + async_input_data, canSendInput = data_wrapper["async_input_data"],data_wrapper["canSendInput"] + # print(f"$$$ canSendInput {canSendInput} ret {async_input_data} GOT CLASSIC INPUT:",res, flush=True) + + if canSendInput: + async_input_data["origin"] = "classic_input" + if res != "": + async_input_data["input"] = res + # final[0] = res + + # queue.put(res) + elif "$none$" in res: + # print("$$$$$$$$ SKIPPING CLASSIC") + async_input_data["input"] = None + else: + async_input_data["input"] = "N" + # final[0] = "N" + # queue.put("N") + + except Exception as e: + # stdin = open(0) + + # print("::: !!! stopped classic input", e) + async_input_data, canSendInput = data_wrapper["async_input_data"], data_wrapper["canSendInput"] + if canSendInput: + async_input_data["input"] = "N" + # final[0] = "N" + # queue.put("N") + +def input_confirmation(prompt, async_input=None): + if async_input == None: + # incase no async_input method was provided + response = input(prompt) + return response, None # classic input, code_revision + # response_revise = [None, None] #response, code_revision + + # finally: + # queue.put(final) + # threading.Thread(target=classic_input, args=[async_input,prompt]).start() + print(prompt) + # data_wrapper[0] = async_input#, True + # data_wrapper[1] = True + data_wrapper["async_input_data"], data_wrapper["canSendInput"] = async_input, True + if active_input["active"] is None: + # classic = ThreadWithExc(target=classic_input, args=[async_input,prompt]) + # classic = ThreadWithExc(target=classic_input, args=[prompt,]) + # classic.start() + threading.Thread(target=classic_input, args=[prompt,]).start() + + # getInput = lambda : [async_input] + # queue = multiprocessing.Queue() + + # queue.put(async_input) + # classic = multiprocessing.Process(target=classic_input, args=(queue,)) + # print(prompt) + # classic.start() + if async_input["input"] != None: print("::: skipping, confirmation already exists:",async_input) + while async_input["input"] is None:# and queue.empty(): + # awaiting for either classic_input or async_input to return a response + # print(".",end="") + time.sleep(0.22) #1111 + # Simulate user input + # if len(async_input) >= 3: + if "origin" in async_input and async_input["origin"] == "classic_input": + # Got answer from classic input + pass + # print("--- ORIGIN ",async_input.pop("origin")) + # print("GOT ANSWER FROM INPUT", async_input) + else: + print(f"(Got external input) {async_input}") + # Got answer from External async input + pass + # simulated_input = "$none$" + # print("GOT ANSWER FROM EXTERNAL INPUT",async_input) + + # sys.stdin = open("/dev/stdin") # Redirect stdin to an open file (Linux) + # Now the waiting thread should receive the input + # classic_input.sim(simulated_input) + # classic.raise_exc(KeyboardInterrupt) + data_wrapper["canSendInput"] = False + # sys.stdin.write(simulated_input + "\n") + # sys.stdin.flush() + # process.terminate() + # process.join() + # classic.raise_exc(KeyboardInterrupt) + # if not queue.empty(): + # answer = queue.get() + # print("$$$$$$$$$ GOT CLASSIC INPUT", answer) + # async_input[0] = answer + # else: + # print("XXXXXX TERMINATING CLASSIC INPUT") + # classic.terminate() + # print("::: Done Async_Input", async_input) + return async_input["input"], async_input["code_revision"] From a12fb759320209c3e83a3f370c08d7d2d38b7314 Mon Sep 17 00:00:00 2001 From: Tami Date: Wed, 8 May 2024 20:00:18 +0300 Subject: [PATCH 3/6] Added Streaming_Out Hook for OIs Output, Added Async_Input to accept (Y/N/changes) from External process, Enabled Manual Code Changes, Enabled Make Changes Before Running (with ai) - Cleanup Done --- interpreter/core/core.py | 1 + interpreter/core/respond.py | 19 +- .../terminal_interface/terminal_interface.py | 27 +-- .../terminal_interface/utils/async_input.py | 219 ++++-------------- 4 files changed, 61 insertions(+), 205 deletions(-) diff --git a/interpreter/core/core.py b/interpreter/core/core.py index aedba890e..78601f35f 100644 --- a/interpreter/core/core.py +++ b/interpreter/core/core.py @@ -171,6 +171,7 @@ def chat(self, message=None, display=True, stream=False, blocking=True, stream_o if self.debug: print(f" ::: Streaming out: {chunk}") if stream_out: stream_out(chunk) # Passed stream_out paramater takes priority over self.stream_out elif self.stream_out: self.stream_out(chunk) + # if not streaming_out, then just *pull* from the stream pass diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index a7fd1117f..c7e445c3f 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -128,7 +128,6 @@ def respond(interpreter): try: # What language/code do you want to run? language = interpreter.messages[-1]["format"].lower().strip() - # print("@@@@@@@@@@@@@@@@@@@\n"+str(interpreter.messages)+"\n@@@@@@@@@@") code = interpreter.messages[-1]["content"] if language == "text": @@ -139,7 +138,6 @@ def respond(interpreter): # Is this language enabled/supported? if interpreter.computer.terminal.get_language(language) == None: output = f"`{language}` disabled or not supported." - # print(":::::::::1") yield { "role": "computer", "type": "console", @@ -150,14 +148,12 @@ def respond(interpreter): # Let the response continue so it can deal with the unsupported code in another way. Also prevent looping on the same piece of code. if code != last_unsupported_code: last_unsupported_code = code - # print(":::::::::2") continue else: break # Yield a message, such that the user can stop code execution if they want to try: - # print(":::::::::3") yield { "role": "computer", "type": "confirmation", @@ -172,17 +168,15 @@ def respond(interpreter): # The user might exit here. # We need to tell python what we (the generator) should do if they exit break - # print(":::::::::4") - # Check if code changed. Gives user a chance to manually edit the code before execution + + # Check if the code was changed. + # Gives user a chance to manually edit the code before execution if (interpreter.messages[-1]["type"] == "code" and code != interpreter.messages[-1]["content"]) or (interpreter.messages[-2]["type"] == "code" and code != interpreter.messages[-2]["content"]): - code = interpreter.messages[-1]["content"] if interpreter.messages[-1]["type"] == "code" else interpreter.messages[-2]["content"] print("(Code has been modified)") - # print("@@@CODE CHANGED!@@@@@!!!!!!!@@@@@@@@@@@\n"+str(interpreter.messages)+"\n@@@@@!!!!!!!!!!!!!!@@@@@") - # print(f"&&&&CODE:::{code}:::") + code = interpreter.messages[-1]["content"] if interpreter.messages[-1]["type"] == "code" else interpreter.messages[-2]["content"] # don't let it import computer — we handle that! if interpreter.computer.import_computer_api and language == "python": - # print(":::::::::5") code = code.replace("import computer\n", "pass\n") code = re.sub( r"import computer\.(\w+) as (\w+)", r"\2 = computer.\1", code @@ -209,7 +203,7 @@ def respond(interpreter): interpreter.computer.verbose = interpreter.verbose interpreter.computer.debug = interpreter.debug interpreter.computer.emit_images = interpreter.llm.supports_vision - # print(":::::::::6") + # sync up the interpreter's computer with your computer try: if interpreter.sync_computer and language == "python": @@ -226,11 +220,8 @@ def respond(interpreter): print(str(e)) print("Continuing...") - # print(":::::::::7") - ## ↓ CODE IS RUN HERE - # print("::::::::::::FINAL RUNNING CODE::::"+code+":::") for line in interpreter.computer.run(language, code, stream=True): yield {"role": "computer", **line} diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py index d73cdd3a7..50ff447d6 100644 --- a/interpreter/terminal_interface/terminal_interface.py +++ b/interpreter/terminal_interface/terminal_interface.py @@ -29,16 +29,17 @@ from .utils.find_image_path import find_image_path from .utils.cli_input import cli_input from .utils.async_input import input_confirmation + # Add examples to the readline history -# TODO Add previous session's lines to readline history examples = [ "How many files are on my desktop?", "What time is it in Seattle?", "Make me a simple Pomodoro app.", "Open Chrome and go to YouTube.", "Can you set my system to light mode?", -] +] # TODO Add previous session's lines to readline history! random.shuffle(examples) + try: for example in examples: readline.add_history(example) @@ -81,7 +82,7 @@ def terminal_interface(interpreter, message, async_input = None): ### This is the primary input for Open Interpreter. message = cli_input("> ").strip() if interpreter.multi_line else input("> ").strip() else: - # User has already left a message using the confirmation input + ### User has requested changes before execution message = input_feedback input_feedback = None try: @@ -261,24 +262,19 @@ def terminal_interface(interpreter, message, async_input = None): if should_scan_code: scan_code(code, language, interpreter) - response, code_revision = input_confirmation( - " Would you like to run this code? (y/n)\n\n " - , async_input = async_input - ) - # print("::: input_confirmation", response, code_revision) + ## ↓ JOINT INPUT METHOD - FROM BOTH CLASSIC INTPUT() AND EXTERNAL INPUT + response, code_revision = input_confirmation(" Would you like to run this code? (y/n)\n\n ", async_input = async_input) + ## ↑ ALLOWS TO ACCEPT (Y/N/Other Requsts) ASWELL AS MANUAL CODE CHANGES FROM EXTERNAL PROCESS + # Resetting async_input for next time - # if async_input != None: async_input[0]=None; async_input[1]=None if async_input != None: async_input["input"]=None; async_input["code_revision"]=None - - # Allow user to manually edit the code before execution if code_revision is not None: - #Replacing Interpreter Last Content - # print("XXXXXX current code:::"+str(interpreter.messages)+":::") + ## ↓ USER MADE MANUAL CHANGES TO THE CODE final_code = f"`\n\n{code_revision}\n" + # Changes were made, set the new code, and replace interpreter's last content + code = final_code interpreter.messages[-1]["content"] = final_code - # print("YYYYYY current code:::"+str(interpreter.messages)+":::") - code = final_code print("") # <- Aesthetic choice if response != None and response.strip().lower() == "y": @@ -298,6 +294,7 @@ def terminal_interface(interpreter, message, async_input = None): # "content": response, # } # ) + ## ↓ ALLOW USER TO REQUEST OI TO MAKE CHANGES TO THE CODE BEFORE EXECUTION input_feedback = response break else: diff --git a/interpreter/terminal_interface/utils/async_input.py b/interpreter/terminal_interface/utils/async_input.py index ee6bc9986..76fb3dcf4 100644 --- a/interpreter/terminal_interface/utils/async_input.py +++ b/interpreter/terminal_interface/utils/async_input.py @@ -1,204 +1,71 @@ import threading -# import multiprocessing -# import ctypes -# import inspect import time -# import io, sys - -# def _async_raise(tid, exctype): -# '''Raises an exception in the threads with id tid''' -# if not inspect.isclass(exctype): -# raise TypeError("Only types can be raised (not instances)") -# res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), -# ctypes.py_object(exctype)) -# if res == 0: -# raise ValueError("invalid thread id") -# elif res != 1: -# # "if it returns a number greater than one, you're in trouble, -# # and you should call it again with exc=NULL to revert the effect" -# ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None) -# raise SystemError("PyThreadState_SetAsyncExc failed") - -# class ThreadWithExc(threading.Thread): -# '''A thread class that supports raising an exception in the thread from -# another thread. -# ''' -# def _get_my_tid(self): -# """determines this (self's) thread id - -# CAREFUL: this function is executed in the context of the caller -# thread, to get the identity of the thread represented by this -# instance. -# """ -# if not self.is_alive(): # Note: self.isAlive() on older version of Python -# raise threading.ThreadError("the thread is not active") - -# # do we have it cached? -# if hasattr(self, "_thread_id"): -# return self._thread_id - -# # no, look for it in the _active dict -# for tid, tobj in threading._active.items(): -# if tobj is self: -# self._thread_id = tid -# return tid - -# # TODO: in python 2.6, there's a simpler way to do: self.ident - -# raise AssertionError("could not determine the thread's id") - -# def raise_exc(self, exctype): -# """Raises the given exception type in the context of this thread. - -# If the thread is busy in a system call (time.sleep(), -# socket.accept(), ...), the exception is simply ignored. - -# If you are sure that your exception should terminate the thread, -# one way to ensure that it works is: - -# t = ThreadWithExc( ... ) -# ... -# t.raise_exc( SomeException ) -# while t.isAlive(): -# time.sleep( 0.1 ) -# t.raise_exc( SomeException ) - -# If the exception is to be caught by the thread, you need a way to -# check that your thread has caught it. - -# CAREFUL: this function is executed in the context of the -# caller thread, to raise an exception in the context of the -# thread represented by this instance. -# """ -# _async_raise( self._get_my_tid(), exctype ) active_input = {"active":None} -# def classic_input(queue): -# data_wrapper = [[], False] # out, canSave data_wrapper = {"async_input_data":{}, "canSendInput":False} # out, canSave -# def classic_input(ret, prompt): -def classic_input(prompt): - - # async_input = ret()[0] - # print("Simulating input:", simulated_input) - # s = sys - # print("::::") - # ret = queue.get() - # final = [None,None] +# This uses the classic input methon (called from a thread) +def classic_input(input_msg): try: - # stdin = open(0) - if False: - final = [None] - def inputLayer(final): - active_input["active"] = "blocked" - # last = input(prompt) - last = input() + ## ↓ GET CLASSIC INPUT() + user_input = input() - # print("LAST:",last) - final[0] = last - active_input["active"] = None - threading.Thread(target=inputLayer, args=[final,]).start() - # print("AWAITING INPUT LAYER") - while(final[0] is None): - time.sleep(0.111) - res = final[0] - res = input() - # print("INPUT LAYER RES:",res) - # print("********************* (Y/N)", end="", flush=True) - # res = stdin.readline() - # queue.put(x) - # print("$$$ canSendInput {canSendInput} GOT CLASSIC INPUT:",res, flush=True) + # Send classic input back to input_confirmation async_input_data, canSendInput = data_wrapper["async_input_data"],data_wrapper["canSendInput"] - # print(f"$$$ canSendInput {canSendInput} ret {async_input_data} GOT CLASSIC INPUT:",res, flush=True) - if canSendInput: + # set origin of input to classic async_input_data["origin"] = "classic_input" - if res != "": - async_input_data["input"] = res - # final[0] = res - - # queue.put(res) - elif "$none$" in res: - # print("$$$$$$$$ SKIPPING CLASSIC") - async_input_data["input"] = None - else: + if user_input == "": async_input_data["input"] = "N" - # final[0] = "N" - # queue.put("N") - - except Exception as e: - # stdin = open(0) - - # print("::: !!! stopped classic input", e) + else: + # (Trigger) Send user input (Y/N/Other Requests) + async_input_data["input"] = user_input + except: async_input_data, canSendInput = data_wrapper["async_input_data"], data_wrapper["canSendInput"] if canSendInput: async_input_data["input"] = "N" - # final[0] = "N" - # queue.put("N") -def input_confirmation(prompt, async_input=None): + +## ↓ JOINT INPUT METHOD - FROM BOTH CLASSIC INTPUT() AND EXTERNAL INPUT +def input_confirmation(input_msg, async_input=None): # async_input:dict {"input":None,""} + ''' Changing `async_input` dict from an external process + can trigger confirmation, just like using input() + and also allows for manual code changes before execution + async_input:dict {"input":None,"code_revison"}''' if async_input == None: - # incase no async_input method was provided - response = input(prompt) - return response, None # classic input, code_revision - # response_revise = [None, None] #response, code_revision + # in case no async_input dict was provided, run normally + response = input(input_msg) + return response, None # input, code_revision - # finally: - # queue.put(final) - # threading.Thread(target=classic_input, args=[async_input,prompt]).start() - print(prompt) - # data_wrapper[0] = async_input#, True - # data_wrapper[1] = True + # Print the question here (Y/N) + print(input_msg) + + # Wrap the input data, Enable classic input from data_wrapper["async_input_data"], data_wrapper["canSendInput"] = async_input, True + # Start the classic input thread (if one isnt already active) if active_input["active"] is None: - # classic = ThreadWithExc(target=classic_input, args=[async_input,prompt]) - # classic = ThreadWithExc(target=classic_input, args=[prompt,]) - # classic.start() - threading.Thread(target=classic_input, args=[prompt,]).start() + # If no other classic input thread is open, create one + threading.Thread(target=classic_input, args=[input_msg,]).start() + if async_input["input"] is not None: pass # Skipping, confirmation already exists + + + ## ↓ WAIT FOR EITHER EXTERNAL INPUT OR CLASSIC INPUT() TO FINISH + ## async_input["input"] can change from external process or from classic input() + + while async_input["input"] is None: + time.sleep(0.1) + + ## ↑ WAIT UNTIL CLASSIC INPUT() FINISHES OR EXTERNAL INPUT FINISHES - # getInput = lambda : [async_input] - # queue = multiprocessing.Queue() - # queue.put(async_input) - # classic = multiprocessing.Process(target=classic_input, args=(queue,)) - # print(prompt) - # classic.start() - if async_input["input"] != None: print("::: skipping, confirmation already exists:",async_input) - while async_input["input"] is None:# and queue.empty(): - # awaiting for either classic_input or async_input to return a response - # print(".",end="") - time.sleep(0.22) #1111 - # Simulate user input - # if len(async_input) >= 3: if "origin" in async_input and async_input["origin"] == "classic_input": - # Got answer from classic input - pass - # print("--- ORIGIN ",async_input.pop("origin")) - # print("GOT ANSWER FROM INPUT", async_input) + pass # Got answer from classic input else: - print(f"(Got external input) {async_input}") # Got answer from External async input - pass - # simulated_input = "$none$" - # print("GOT ANSWER FROM EXTERNAL INPUT",async_input) + print(f"Got external input: {async_input}") - # sys.stdin = open("/dev/stdin") # Redirect stdin to an open file (Linux) - # Now the waiting thread should receive the input - # classic_input.sim(simulated_input) - # classic.raise_exc(KeyboardInterrupt) + # Disable input until next confirmation data_wrapper["canSendInput"] = False - # sys.stdin.write(simulated_input + "\n") - # sys.stdin.flush() - # process.terminate() - # process.join() - # classic.raise_exc(KeyboardInterrupt) - # if not queue.empty(): - # answer = queue.get() - # print("$$$$$$$$$ GOT CLASSIC INPUT", answer) - # async_input[0] = answer - # else: - # print("XXXXXX TERMINATING CLASSIC INPUT") - # classic.terminate() - # print("::: Done Async_Input", async_input) + + # return input from either classic or external + code revisions return async_input["input"], async_input["code_revision"] From c54d8c21cea2a99dba2a97780d1b04e9f241a64c Mon Sep 17 00:00:00 2001 From: Tami Date: Wed, 8 May 2024 22:14:20 +0300 Subject: [PATCH 4/6] Added Streaming_Out Hook for OIs Output, Added Async_Input to accept (Y/N/changes) from External process, Enabled Manual Code Changes, Enabled Make Changes Before Running (with ai) - Cleanup Done! Added Example/stream_out.py --- examples/stream_out.py | 195 ++++++++++++++++++ .../terminal_interface/utils/async_input.py | 3 +- 2 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 examples/stream_out.py diff --git a/examples/stream_out.py b/examples/stream_out.py new file mode 100644 index 000000000..01f504edf --- /dev/null +++ b/examples/stream_out.py @@ -0,0 +1,195 @@ +from interpreter import interpreter, computer +import time +# ____ ____ __ __ +# / __ \____ ___ ____ / _/___ / /____ _________ ________ / /____ _____ +# / / / / __ \/ _ \/ __ \ / // __ \/ __/ _ \/ ___/ __ \/ ___/ _ \/ __/ _ \/ ___/ +# / /_/ / /_/ / __/ / / / _/ // / / / /_/ __/ / / /_/ / / / __/ /_/ __/ / +# \____/ .___/\___/_/ /_/ /___/_/ /_/\__/\___/_/ / .___/_/ \___/\__/\___/_/ +# /_/ /_/ +# ____ _____ ____ _____ _ __ __ ___ _ _ _____ +# / ___|_ _| _ \| ____| / \ | \/ | / _ \| | | |_ _| +# \___ \ | | | |_) | _| / _ \ | |\/| |_____| | | | | | | | | +# ___) || | | _ <| |___ / ___ \| | | |_____| |_| | |_| | | | +# |____/ |_| |_| \_\_____/_/ \_\_| |_| \___/ \___/ |_| + +''' +# THIS EXAMPLE SHOWS HOW TO: +# 1. Stream-Out All of OpenInterpreter's outputs to another process (like another UI) +# 2. Async-Input To Trigger (Y/N/Other) Remotely +# 3. Make Changes to the Code Before Execution +# - If you answer Other than "y" or "n" your answer will be counted as a User Message +# - If you manually change the code, the new revised code will be run +''' + + +interpreter.llm.model = 'mixtral-8x7b-32768' +interpreter.llm.model = 'llama3-70b-8192' +interpreter.llm.api_key = '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' +interpreter.llm.api_base = "https://api.groq.com/openai/v1" +interpreter.llm.context_window = 32000 + + +#______________________________________ +# Data placeholders used to do async streaming out +from collections import deque + +block_queue = deque() +full_queue = deque() +blocks_unfinished = deque() +pauseSend = [False] + +# Useful for whatsapp and other messaging apps (set to True) +update_by_blocks = False +ignore_formats = ['active_line'] +independent_blocks = ['confirmation', 'output'] # These will be sent as whole +#______________________________________ + + +#______________________________________ +# Prep for my implemintation +# from xo_benedict.freshClient import FreshClient # +# client = FreshClient(_inc=3) +#______________________________________ + +## ↓ EXAMPLE FOR THE FINAL METHOD TO STREAM OI'S OUTPUT TO ANOTHER PROGRAM +def _update(item, debug = False): + def _format(lines): + hSize = 4 + return lines.replace("**Plan:**",f"Plan:").replace("**Code:**",f"Code:") + + if not pauseSend[0]: + if debug: print(f"::: STREAMING OUT:", item) + stream_out = _format(str(item)) + + ## ↓↓↓ SEND OUT OI'S OUTPUT + + #client.addText(stream_out) # Just an example, my personal implemintation + if debug: print(" --CHANGE THIS-- STREAMING OUTPUT: ",stream_out) + + ## ↑↑↑ CHANGE THIS to something that triggers YOUR APPLICATION +## ↑ You can change this function to one that suites your application + + + +# ↓ STREAN OUT HOOK - This function is passed into chat() and is called on every output from +def stream_out_hook(partial, debug = False, *a, **kw): + # Gets Chunks from OpenInterpreter and sends them to an async update queue + ''' THIS FUNCTION PROCESSES ALL THE OUTPUTS FROM OPEN INTERPRETER + Prepares all the chunks to be sent out + update_by_blocks=True will batch similar messages, False will stream (unless in independent_blocks ) + ''' + if debug: print("STREAMING OUT! ",partial) + + ## ↓ Send all the different openinterpreter chunk types to the queue + + if "start" in partial and partial["start"]: + if update_by_blocks: + blocks_unfinished.append({"content":"",**partial})#,"content_parts":[],**partial}) + else: + full_queue.append(partial) + if partial['type'] in independent_blocks or 'format' in partial and partial['format'] in independent_blocks: + if update_by_blocks: + block_queue.append({"independent":True,**partial}) + else: + full_queue.append({"independent":True,**partial}) + if debug: print("INDEPENDENT BLOCK", partial) + elif 'content' in partial and ('format' not in partial or partial['format'] not in ignore_formats): + if update_by_blocks: + blocks_unfinished[0]['content'] += partial['content'] + else: + full_queue.append(partial['content']) + # blocks[-1]['content_parts'].append(partial['content']) + if 'end' in partial: + if debug: print("EEEnd",blocks_unfinished, partial) + fin = {**partial} + if update_by_blocks: + blocks_unfinished[0]['end'] = partial['end'] + fin = blocks_unfinished.popleft() + block_queue.append(fin) + else: + full_queue.append(fin) + + if debug: print("FINISHED BLOCK", fin) + + + + +#______________________________________ +# Continuesly Recieve OpenInterpreter Chunks and Prepare them to be Sent Out +def update_queue(debug = False, *a, **kw): + target = full_queue + if update_by_blocks: + target = block_queue + c = 0 + while(True): + while(len(target) > 0): + leftmost_item = target.popleft() + if debug: print(f"{c} ::: UPDATING QUEUE:", leftmost_item) + + # + if "start" in leftmost_item: + if "type" in leftmost_item and leftmost_item["type"] == "code": + _update("__________________________________________\n") + pauseSend[0] = True + elif "end" in leftmost_item: + if "type" in leftmost_item and leftmost_item["type"] == "code": + pauseSend[0] = False + elif isinstance(leftmost_item, str): _update(leftmost_item) + else: + content = "" if "content" not in leftmost_item else leftmost_item["content"] + if "content" in leftmost_item and not isinstance(leftmost_item["content"],str): + content = leftmost_item['content']['content'] if not isinstance(leftmost_item['content'],str) else leftmost_item['content'] + if len(content) >0 and content[0] == "\n": content = content[1:] + if "type" in leftmost_item and leftmost_item["type"] in ["confirmation"]: + if len(content)>0 and content[0] != "<" and content[-1] != ">": content = ""+content+ "" + _update(content+"

Would you like to run this code? (Y/N)

" + +" You can also edit it before accepting
__________________________________________
") + elif "type" in leftmost_item and leftmost_item["type"] == 'console': + if len(content)>0 and content != "\n": + if debug: print(f"::: content :::{content}:::") + if content[0] != "<" and content[-1] != ">": content = ""+content+ "" + _update(f"

OUTPUT:

{content}
") + else: + _update(leftmost_item) + time.sleep(0.1) + +from threading import Thread +update_queue_thread = Thread(target=update_queue) +update_queue_thread.start() +# ↑ Start Async Thread to Process Chunks Before streaming out +#______________________________________ + +# Run tests, one after the other +def test_async_input(tests): + for i, answer, code_revision in tests: + # Wait {i} seconds + while(i>0): + if i%5==0: print(f"::: Testing Input:\"{answer}\" with code:{code_revision} in: {i} seconds") + time.sleep(1) + i-=1 + + ## ↓ TRIGGER EXTERNAL INPUT + async_input_data["input"] = answer + async_input_data["code_revision"] = code_revision + ## ↑ OPTIONAL CODE CHANGES + + pass #print(" TEST DONE ", async_input_data) + + +## ↓ THIS IS OBJECT BEING WATCHED FOR TRIGGERING INPUT +async_input_data = {"input":None, "code_revision":None} +## ↑ CHANGING async_input_data["input"] WILL TRIGGER OI'S INPUT +if __name__ == "__main__": + + ## Test automatic external trigger for (Y/N/Other) + code revisions + tests = [ + # seconds_to_wait, input_response, new_code_to_run + [20, "Y", "print('SUCCESS!!!!!!!!')"], + # [20,"N",None], + # [20,"print hello {username from host} instead", None], + ] + Thread(target=test_async_input, args=[tests,]).start() + + # Start OpenInterpreter + '''# Pass in stream_out_hook function, and async_input_data ''' + interpreter.chat(stream_out = stream_out_hook, async_input = async_input_data) diff --git a/interpreter/terminal_interface/utils/async_input.py b/interpreter/terminal_interface/utils/async_input.py index 76fb3dcf4..16075e722 100644 --- a/interpreter/terminal_interface/utils/async_input.py +++ b/interpreter/terminal_interface/utils/async_input.py @@ -27,11 +27,12 @@ def classic_input(input_msg): ## ↓ JOINT INPUT METHOD - FROM BOTH CLASSIC INTPUT() AND EXTERNAL INPUT +# async_input:dict = {"input":None, "code_revison":None} def input_confirmation(input_msg, async_input=None): # async_input:dict {"input":None,""} ''' Changing `async_input` dict from an external process can trigger confirmation, just like using input() and also allows for manual code changes before execution - async_input:dict {"input":None,"code_revison"}''' + ''' if async_input == None: # in case no async_input dict was provided, run normally response = input(input_msg) From b42c9a299da6afa4b444ddc3a680747a6b4e6c67 Mon Sep 17 00:00:00 2001 From: Tami Date: Wed, 8 May 2024 23:34:55 +0300 Subject: [PATCH 5/6] Added Docs & Example stream_out.py --- docs/guides/streaming-output.mdx | 25 +++++++++++++++++++ interpreter/core/respond.py | 2 +- .../terminal_interface/terminal_interface.py | 2 -- 3 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 docs/guides/streaming-output.mdx diff --git a/docs/guides/streaming-output.mdx b/docs/guides/streaming-output.mdx new file mode 100644 index 000000000..fceb31a87 --- /dev/null +++ b/docs/guides/streaming-output.mdx @@ -0,0 +1,25 @@ +--- +title: Streaming Output +--- + +You can stream out all the output from Open Interpreter by adding `stream_output=function` in an `interpreter.chat()` call (optional). + +You can also trigger OpenInterpreter's (Y/N) Confirmation Input REMOTELY by passing `async_input={"input":None, "code_revision":None}` changing the dict['input'] will trigger the (Y/N) confirmation, place your answer there + +Additionally, you can pass New Code to dict['code_revision'] and it will be executed instead of the last codeblock (usefull for manual editing) + +```python +## ↓ THIS FUNCTION IS CALLED ON ALL OI'S OUTPUTS +def stream_out_hook(partial, debug = False, *a, **kw): + ''' THIS FUNCTION PROCESSES ALL THE OUTPUTS FROM OPEN INTERPRETER ''' + if debug: print("STREAMING OUT! ",partial) + # Replace this function with one that will send the output to YOUR APPLICATION + +## ↓ THIS IS OBJECT BEING WATCHED FOR TRIGGERING INPUT +async_input_data = {"input":None, "code_revision":None} +## ↑ CHANGING async_input_data["input"] WILL TRIGGER OI'S (Y/N/OTHER) CONFIRMATION INPUT + +interpreter.chat(stream_out = stream_out_hook, async_input = async_input_data) +``` + +## For a more comprehensive & full example, please checkout [examples/stream_out.py](https://github.com/KillianLucas/open-interpreter/blob/main/docs/examples/stream_out.py) diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py index 21d920558..cde8eea4e 100644 --- a/interpreter/core/respond.py +++ b/interpreter/core/respond.py @@ -225,8 +225,8 @@ def respond(interpreter): print(str(e)) print("Continuing...") - ## ↓ CODE IS RUN HERE + for line in interpreter.computer.run(language, code, stream=True): yield {"role": "computer", **line} diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py index 2d53bea0a..0908c8927 100644 --- a/interpreter/terminal_interface/terminal_interface.py +++ b/interpreter/terminal_interface/terminal_interface.py @@ -14,8 +14,6 @@ import re import subprocess import time -import threading -import multiprocessing from ..core.utils.scan_code import scan_code from ..core.utils.system_debug_info import system_info From dfd6918b457327d7e892235c4a3851929af479d6 Mon Sep 17 00:00:00 2001 From: Tami Date: Wed, 8 May 2024 23:35:44 +0300 Subject: [PATCH 6/6] Added Groq Docs (from my other PR) --- docs/language-models/hosted-models/groq.mdx | 72 +++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 docs/language-models/hosted-models/groq.mdx diff --git a/docs/language-models/hosted-models/groq.mdx b/docs/language-models/hosted-models/groq.mdx new file mode 100644 index 000000000..f0c151e46 --- /dev/null +++ b/docs/language-models/hosted-models/groq.mdx @@ -0,0 +1,72 @@ +--- +title: Groq +--- + +To use Open Interpreter with a model from Groq, simply run: + + + +```bash Terminal +interpreter --model groq/llama3-8b-8192 +``` + +```python Python +from interpreter import interpreter + +interpreter.llm.model = "groq/llama3-8b-8192" +interpreter.llm.api_key = '' +interpreter.chat() +``` + + + +If you are having any issues when passing the `--model`, try adding the `--api_base`: + + + +```bash Terminal +interpreter --api_base "https://api.groq.com/openai/v1" --model groq/llama3-8b-8192 --api_key $GROQ_API_KEY +``` + +```python Python +from interpreter import interpreter + +interpreter.llm.model = "groq/llama3-8b-8192" +interpreter.llm.api_key = '' +interpreter.llm.api_base = "https://api.groq.com/openai/v1" +interpreter.llm.context_window = 32000 +interpreter.chat() +``` + + + +# Supported Models + +We support any model on [Groq's models page:](https://console.groq.com/docs/models) + + + +```bash Terminal +interpreter --model groq/mixtral-8x7b-32768 +interpreter --model groq/llama3-8b-8192 +interpreter --model groq/llama3-70b-8192 +interpreter --model groq/gemma-7b-it +``` + +```python Python +interpreter.llm.model = "groq/mixtral-8x7b-32768" +interpreter.llm.model = "groq/llama3-8b-8192" +interpreter.llm.model = "groq/llama3-70b-8192" +interpreter.llm.model = "groq/gemma-7b-it" +``` + + + +# Required Environment Variables + +Run `export GROQ_API_KEY=''` or place it in your rc file and re-source +Set the following environment variables [(click here to learn how)](https://chat.openai.com/share/1062cdd8-62a1-4aa8-8ec9-eca45645971a) to use these models. + +| Environment Variable | Description | Where to Find | +| -------------------- | ---------------------------------------------------- | ------------------------------------------------------------------- | +| `GROQ_API_KEY` | The API key for authenticating to Groq's services. | [Groq Account Page](https://console.groq.com/keys) |