Open
Description
- ChatLab version: 2.0
- Python version: 3.12
- Operating System: macOS
Description
I wanted to continue a chat even after a function had generated an exception.
What I Did
import chatlab
def add_two_numbers(a: float, b: float) -> float:
"""Add two numbers together. Raises an exception when the numbers are in the wrong order."""
if b < a:
return a + b
raise Exception("I can't do math")
chat = chatlab.Chat(model=chatlab.models.GPT_4_0125_PREVIEW, chat_functions=[add_two_numbers])
await chat("Please add 1 + 2 for me")
This generates the exception:
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
Cell In[11], line 10
7 raise Exception("I can't do math")
9 chat = chatlab.Chat(model=chatlab.models.GPT_4_0125_PREVIEW, chat_functions=[add_two_numbers])
---> 10 await chat("Please add 1 + 2 for me")
File ~/Wharf/src/chatlab/chatlab/chat.py:125, in Chat.__call__(self, stream, *messages, **kwargs)
123 async def __call__(self, *messages: Union[ChatCompletionMessageParam, str], stream=True, **kwargs):
124 """Send messages to the chat model and display the response."""
--> 125 return await self.submit(*messages, stream=stream, **kwargs)
File ~/Wharf/src/chatlab/chatlab/chat.py:350, in Chat.submit(self, stream, *messages, **kwargs)
347 self.append(assistant_tool_calls(tool_arguments))
348 for tool_argument in tool_arguments:
349 # Oh crap I need to append the big assistant call of it too. May have to assume we've done it by here.
--> 350 function_called = await tool_argument.call(self.function_registry)
351 # TODO: Format the tool message
352 self.append(function_called.get_tool_called_message())
File ~/Wharf/src/chatlab/chatlab/views/tools.py:146, in ToolArguments.call(self, function_registry)
144 # Execute the function and get the result
145 try:
--> 146 output = await function_registry.call(function_name, function_args)
147 except FunctionArgumentError as e:
148 self.finished = True
File ~/Wharf/src/chatlab/chatlab/registry.py:474, in FunctionRegistry.call(self, name, arguments)
472 result = await function(**prepared_arguments)
473 else:
--> 474 result = function(**prepared_arguments)
475 return result
Cell In[11], line 7, in add_two_numbers(a, b)
5 if b < a:
6 return a + b
----> 7 raise Exception("I can't do math")
Exception: I can't do math
and all future calls to the chat generate a 400 error code from OpenAI:
await chat("what went wrong there?")
---------------------------------------------------------------------------
BadRequestError Traceback (most recent call last)
Cell In[10], line 1
----> 1 await chat("what went wrong there?")
File ~/Wharf/src/chatlab/chatlab/chat.py:125, in Chat.__call__(self, stream, *messages, **kwargs)
123 async def __call__(self, *messages: Union[ChatCompletionMessageParam, str], stream=True, **kwargs):
124 """Send messages to the chat model and display the response."""
--> 125 return await self.submit(*messages, stream=stream, **kwargs)
File ~/Wharf/src/chatlab/chatlab/chat.py:302, in Chat.submit(self, stream, *messages, **kwargs)
299 # Due to the strict response typing based on `Literal` typing on `stream`, we have to process these
300 # two cases separately
301 if stream:
--> 302 streaming_response = await client.chat.completions.create(
303 **chat_create_kwargs,
304 stream=True,
305 )
307 self.append(*messages)
309 finish_reason, function_call_request, tool_arguments = await self.__process_stream(streaming_response)
File ~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/resources/chat/completions.py:1291, in AsyncCompletions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, presence_penalty, response_format, seed, stop, stream, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
1242 @required_args(["messages", "model"], ["messages", "model", "stream"])
1243 async def create(
1244 self,
(...)
1289 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1290 ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
-> 1291 return await self._post(
1292 "/chat/completions",
1293 body=maybe_transform(
1294 {
1295 "messages": messages,
1296 "model": model,
1297 "frequency_penalty": frequency_penalty,
1298 "function_call": function_call,
1299 "functions": functions,
1300 "logit_bias": logit_bias,
1301 "logprobs": logprobs,
1302 "max_tokens": max_tokens,
1303 "n": n,
1304 "presence_penalty": presence_penalty,
1305 "response_format": response_format,
1306 "seed": seed,
1307 "stop": stop,
1308 "stream": stream,
1309 "temperature": temperature,
1310 "tool_choice": tool_choice,
1311 "tools": tools,
1312 "top_logprobs": top_logprobs,
1313 "top_p": top_p,
1314 "user": user,
1315 },
1316 completion_create_params.CompletionCreateParams,
1317 ),
1318 options=make_request_options(
1319 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1320 ),
1321 cast_to=ChatCompletion,
1322 stream=stream or False,
1323 stream_cls=AsyncStream[ChatCompletionChunk],
1324 )
File ~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1578, in AsyncAPIClient.post(self, path, cast_to, body, files, options, stream, stream_cls)
1564 async def post(
1565 self,
1566 path: str,
(...)
1573 stream_cls: type[_AsyncStreamT] | None = None,
1574 ) -> ResponseT | _AsyncStreamT:
1575 opts = FinalRequestOptions.construct(
1576 method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options
1577 )
-> 1578 return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
File ~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1339, in AsyncAPIClient.request(self, cast_to, options, stream, stream_cls, remaining_retries)
1330 async def request(
1331 self,
1332 cast_to: Type[ResponseT],
(...)
1337 remaining_retries: Optional[int] = None,
1338 ) -> ResponseT | _AsyncStreamT:
-> 1339 return await self._request(
1340 cast_to=cast_to,
1341 options=options,
1342 stream=stream,
1343 stream_cls=stream_cls,
1344 remaining_retries=remaining_retries,
1345 )
File ~/.pyenv/versions/3.12.1/lib/python3.12/site-packages/openai/_base_client.py:1429, in AsyncAPIClient._request(self, cast_to, options, stream, stream_cls, remaining_retries)
1426 await err.response.aread()
1428 log.debug("Re-raising status error")
-> 1429 raise self._make_status_error_from_response(err.response) from None
1431 return self._process_response(
1432 cast_to=cast_to,
1433 options=options,
(...)
1436 stream_cls=stream_cls,
1437 )
BadRequestError: Error code: 400 - {'error': {'message': "An assistant message with 'tool_calls' must be followed by tool messages responding to each 'tool_call_id'. The following tool_call_ids did not have response messages: call_KTJddJ7ScPS972aOPs2Owwdl", 'type': 'invalid_request_error', 'param': 'messages.[2].role', 'code': None}}
Ideally, the exception result would be added to the message log, and allow the chat to continue. (And perhaps even allow the model to try a fix for the exception...)
Metadata
Metadata
Assignees
Labels
No labels