From 47b0eb172e5333bc20509a06dc0bd76044092d2e Mon Sep 17 00:00:00 2001 From: George Murray Date: Tue, 3 Oct 2023 11:44:14 -0700 Subject: [PATCH] Fix temp file resource leak (#569) ChatOpenAI will soon be consolidated, but this fixes a resource leak within it. Also adds more helpful type hinting on File. --- src/steamship/agents/llms/openai.py | 65 ++++++++++++++++------------- src/steamship/data/file.py | 2 +- 2 files changed, 36 insertions(+), 31 deletions(-) diff --git a/src/steamship/agents/llms/openai.py b/src/steamship/agents/llms/openai.py index 5ec142c8..f8ac6bca 100644 --- a/src/steamship/agents/llms/openai.py +++ b/src/steamship/agents/llms/openai.py @@ -91,35 +91,40 @@ def chat(self, messages: List[Block], tools: Optional[List[Tool]], **kwargs) -> tags=[Tag(kind=TagKind.GENERATION, name=GenerationTag.PROMPT_COMPLETION)], ) - options = {} - if len(tools) > 0: - functions = [] - for tool in tools: - functions.append(tool.as_openai_function().dict()) - options["functions"] = functions - - if "max_tokens" in kwargs: - options["max_tokens"] = kwargs["max_tokens"] - - extra = { - AgentLogging.LLM_NAME: "OpenAI", - AgentLogging.IS_MESSAGE: True, - AgentLogging.MESSAGE_TYPE: AgentLogging.PROMPT, - AgentLogging.MESSAGE_AUTHOR: AgentLogging.LLM, - } - - if logging.DEBUG >= logging.root.getEffectiveLevel(): - extra["messages"] = json.dumps( - "\n".join([f"[{msg.chat_role}] {msg.as_llm_input()}" for msg in messages]) + try: + options = {} + if len(tools) > 0: + functions = [] + for tool in tools: + functions.append(tool.as_openai_function().dict()) + options["functions"] = functions + + if "max_tokens" in kwargs: + options["max_tokens"] = kwargs["max_tokens"] + + extra = { + AgentLogging.LLM_NAME: "OpenAI", + AgentLogging.IS_MESSAGE: True, + AgentLogging.MESSAGE_TYPE: AgentLogging.PROMPT, + AgentLogging.MESSAGE_AUTHOR: AgentLogging.LLM, + } + + if logging.DEBUG >= logging.root.getEffectiveLevel(): + extra["messages"] = json.dumps( + "\n".join([f"[{msg.chat_role}] {msg.as_llm_input()}" for msg in messages]) + ) + extra["tools"] = ",".join([t.name for t in tools]) + else: + extra["num_messages"] = len(messages) + extra["num_tools"] = len(tools) + + logging.info(f"OpenAI ChatComplete ({messages[-1].as_llm_input()})", extra=extra) + + tool_selection_task = self.generator.generate( + input_file_id=temp_file.id, options=options ) - extra["tools"] = ",".join([t.name for t in tools]) - else: - extra["num_messages"] = len(messages) - extra["num_tools"] = len(tools) - - logging.info(f"OpenAI ChatComplete ({messages[-1].as_llm_input()})", extra=extra) - - tool_selection_task = self.generator.generate(input_file_id=temp_file.id, options=options) - tool_selection_task.wait() + tool_selection_task.wait() - return tool_selection_task.output.blocks + return tool_selection_task.output.blocks + finally: + temp_file.delete() diff --git a/src/steamship/data/file.py b/src/steamship/data/file.py index d7423da3..6625ba57 100644 --- a/src/steamship/data/file.py +++ b/src/steamship/data/file.py @@ -121,7 +121,7 @@ def create( blocks: List[Block] = None, tags: List[Tag] = None, public_data: bool = False, - ) -> Any: + ) -> File: req = { "handle": handle,