diff --git a/docs/docs/use_cases/tool_use/agents.ipynb b/docs/docs/use_cases/tool_use/agents.ipynb index eede7740d6ebc0..797a05c69d4d39 100644 --- a/docs/docs/use_cases/tool_use/agents.ipynb +++ b/docs/docs/use_cases/tool_use/agents.ipynb @@ -17,13 +17,13 @@ "jp-MarkdownHeadingCollapsed": true }, "source": [ - "## Agents\n", + "## Repeated tool use with agents\n", "\n", "Chains are great when we know the specific sequence of tool usage needed for any user input. But for certain use cases, how many times we use tools depends on the input. In these cases, we want to let the model itself decide how many times to use tools and in what order. [Agents](/docs/modules/agents/) let us do just this.\n", "\n", "LangChain comes with a number of built-in agents that are optimized for different use cases. Read about all the [agent types here](/docs/modules/agents/agent_types/).\n", "\n", - "As an example, let's try out the OpenAI tools agent, which makes use of the new OpenAI tool-calling API (this is only available in the latest OpenAI models, and differs from function-calling in that the model can return multiple function invocations at once).\n", + "We'll use the [tool calling agent](/docs/modules/agents/agent_types/tool_calling/), which is generally the most reliable kind and the recommended one for most use cases. \"Tool calling\" in this case refers to a specific type of model API that allows for explicitly passing tool definitions to models and getting explicit tool invocations out. For more on tool calling models see [this guide].(/docs/modules/model_io/chat/function_calling/)\n", "\n", "![agent](../../../static/img/tool_agent.svg)" ] @@ -45,7 +45,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain langchainhub" ] }, { @@ -53,12 +53,12 @@ "id": "a33915ce-00c5-4379-8a83-c0053e471cdb", "metadata": {}, "source": [ - "And set these environment variables:" + "If you'd like to use LangSmith, set the environment variables below:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "54667a49-c226-486d-a887-33120c90cc91", "metadata": {}, "outputs": [], @@ -66,9 +66,7 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", "\n", - "# If you'd like to use LangSmith, uncomment the below\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] @@ -85,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "1c44ba79-6ab2-4d55-8247-82fca4d9b70c", "metadata": {}, "outputs": [], @@ -124,19 +122,18 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "id": "e27a4e1a-938b-4b60-8e32-25e4ee530274", "metadata": {}, "outputs": [], "source": [ "from langchain import hub\n", - "from langchain.agents import AgentExecutor, create_openai_tools_agent\n", - "from langchain_openai import ChatOpenAI" + "from langchain.agents import AgentExecutor, create_tool_calling_agent" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "bcc9536e-0328-4e29-9d3d-133f3e63e589", "metadata": {}, "outputs": [ @@ -173,27 +170,45 @@ "id": "85e9875a-d8d4-4712-b3f0-b513c684451b", "metadata": {}, "source": [ - "## Create agent" + "## Create agent\n", + "\n", + "We'll need to use a model with tool calling capabilities. You can see which models support tool calling [here](/docs/integrations/chat/).\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "a1c5319d-6609-449d-8dd0-127e9a600656", + "execution_count": 5, + "id": "9583aef3-a2cf-461e-8506-8a22f4c730b8", "metadata": {}, "outputs": [], "source": [ - "# Choose the LLM that will drive the agent\n", - "# Only certain models support this\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n", + "# | echo: false\n", + "# | output: false\n", + "from langchain_anthropic import ChatAnthropic\n", "\n", - "# Construct the OpenAI Tools agent\n", - "agent = create_openai_tools_agent(model, tools, prompt)" + "llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, + "id": "a1c5319d-6609-449d-8dd0-127e9a600656", + "metadata": {}, + "outputs": [], + "source": [ + "# Construct the tool calling agent\n", + "agent = create_tool_calling_agent(llm, tools, prompt)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, "id": "c86bfe50-c5b3-49ed-86c8-1fe8dcd0c83a", "metadata": {}, "outputs": [], @@ -212,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "id": "c098f8df-fd7f-4c13-963a-8e34194d3f84", "metadata": {}, "outputs": [ @@ -225,21 +240,23 @@ "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `exponentiate` with `{'base': 3, 'exponent': 5}`\n", - "\n", + "responded: [{'text': \"Okay, let's break this down step-by-step:\", 'type': 'text'}, {'id': 'toolu_01CjdiDhDmMtaT1F4R7hSV5D', 'input': {'base': 3, 'exponent': 5}, 'name': 'exponentiate', 'type': 'tool_use'}]\n", "\n", "\u001b[0m\u001b[38;5;200m\u001b[1;3m243\u001b[0m\u001b[32;1m\u001b[1;3m\n", "Invoking: `add` with `{'first_int': 12, 'second_int': 3}`\n", - "\n", + "responded: [{'text': '3 to the 5th power is 243.', 'type': 'text'}, {'id': 'toolu_01EKqn4E5w3Zj7bQ8s8xmi4R', 'input': {'first_int': 12, 'second_int': 3}, 'name': 'add', 'type': 'tool_use'}]\n", "\n", "\u001b[0m\u001b[33;1m\u001b[1;3m15\u001b[0m\u001b[32;1m\u001b[1;3m\n", "Invoking: `multiply` with `{'first_int': 243, 'second_int': 15}`\n", - "\n", + "responded: [{'text': '12 + 3 = 15', 'type': 'text'}, {'id': 'toolu_017VZJgZBYbwMo2KGD6o6hsQ', 'input': {'first_int': 243, 'second_int': 15}, 'name': 'multiply', 'type': 'tool_use'}]\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3m3645\u001b[0m\u001b[32;1m\u001b[1;3m\n", - "Invoking: `exponentiate` with `{'base': 3645, 'exponent': 2}`\n", + "Invoking: `multiply` with `{'first_int': 3645, 'second_int': 3645}`\n", + "responded: [{'text': '243 * 15 = 3645', 'type': 'text'}, {'id': 'toolu_01RtFCcQgbVGya3NVDgTYKTa', 'input': {'first_int': 3645, 'second_int': 3645}, 'name': 'multiply', 'type': 'tool_use'}]\n", "\n", + "\u001b[0m\u001b[36;1m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mSo 3645 squared is 13,286,025.\n", "\n", - "\u001b[0m\u001b[38;5;200m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of raising 3 to the fifth power and multiplying that by the sum of twelve and three, then squaring the whole result is 13,286,025.\u001b[0m\n", + "Therefore, the final result of taking 3 to the 5th power (243), multiplying by 12 + 3 (15), and then squaring the whole result is 13,286,025.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] @@ -248,10 +265,10 @@ "data": { "text/plain": [ "{'input': 'Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result',\n", - " 'output': 'The result of raising 3 to the fifth power and multiplying that by the sum of twelve and three, then squaring the whole result is 13,286,025.'}" + " 'output': 'So 3645 squared is 13,286,025.\\n\\nTherefore, the final result of taking 3 to the 5th power (243), multiplying by 12 + 3 (15), and then squaring the whole result is 13,286,025.'}" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -263,13 +280,21 @@ " }\n", ")" ] + }, + { + "cell_type": "markdown", + "id": "4ecc190c-c133-493e-bd3e-f73e9690bae1", + "metadata": {}, + "source": [ + "You can see the [LangSmith trace here](https://smith.langchain.com/public/92694ff3-71b7-44ed-bc45-04bdf04d4689/r)." + ] } ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/human_in_the_loop.ipynb b/docs/docs/use_cases/tool_use/human_in_the_loop.ipynb index 9dd15837bd38e3..188d8e8d5ac3b6 100644 --- a/docs/docs/use_cases/tool_use/human_in_the_loop.ipynb +++ b/docs/docs/use_cases/tool_use/human_in_the_loop.ipynb @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain" ] }, { @@ -48,8 +48,6 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", "# If you'd like to use LangSmith, uncomment the below:\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" @@ -62,33 +60,57 @@ "source": [ "## Chain\n", "\n", - "Suppose we have the following (dummy) tools and tool-calling chain:" + "Suppose we have the following (dummy) tools and tool-calling chain:\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", "execution_count": 2, + "id": "e0ff02ac-e750-493b-9b09-4578711a6726", + "metadata": {}, + "outputs": [], + "source": [ + "# | echo: false\n", + "# | outout: false\n", + "\n", + "from langchain_anthropic import ChatAnthropic\n", + "\n", + "llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, "id": "0221fdfd-2a18-4449-a123-e6b0b15bb3d9", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'count_emails', 'args': {'last_n_days': 5}, 'output': 10}]" + "[{'name': 'count_emails',\n", + " 'args': {'last_n_days': 5},\n", + " 'id': 'toolu_012VHuh7vk5dVNct5SgZj3gh',\n", + " 'output': 10}]" ] }, - "execution_count": 2, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from operator import itemgetter\n", + "from typing import Dict, List\n", "\n", - "from langchain.output_parsers import JsonOutputToolsParser\n", - "from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough\n", + "from langchain_core.messages import AIMessage\n", + "from langchain_core.runnables import Runnable, RunnablePassthrough\n", "from langchain_core.tools import tool\n", - "from langchain_openai import ChatOpenAI\n", "\n", "\n", "@tool\n", @@ -104,19 +126,19 @@ "\n", "\n", "tools = [count_emails, send_email]\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0).bind_tools(tools)\n", + "llm_with_tools = llm.bind_tools(tools)\n", "\n", "\n", - "def call_tool(tool_invocation: dict) -> Runnable:\n", - " \"\"\"Function for dynamically constructing the end of the chain based on the model-selected tool.\"\"\"\n", + "def call_tools(msg: AIMessage) -> List[Dict]:\n", + " \"\"\"Simple sequential tool calling helper.\"\"\"\n", " tool_map = {tool.name: tool for tool in tools}\n", - " tool = tool_map[tool_invocation[\"type\"]]\n", - " return RunnablePassthrough.assign(output=itemgetter(\"args\") | tool)\n", + " tool_calls = msg.tool_calls.copy()\n", + " for tool_call in tool_calls:\n", + " tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n", + " return tool_calls\n", "\n", "\n", - "# .map() allows us to apply a function to a list of inputs.\n", - "call_tool_list = RunnableLambda(call_tool).map()\n", - "chain = model | JsonOutputToolsParser() | call_tool_list\n", + "chain = llm_with_tools | call_tools\n", "chain.invoke(\"how many emails did i get in the last 5 days?\")" ] }, @@ -132,7 +154,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 9, "id": "341fb055-0315-47bc-8f72-ed6103d2981f", "metadata": {}, "outputs": [], @@ -140,23 +162,23 @@ "import json\n", "\n", "\n", - "def human_approval(tool_invocations: list) -> Runnable:\n", + "def human_approval(msg: AIMessage) -> Runnable:\n", " tool_strs = \"\\n\\n\".join(\n", - " json.dumps(tool_call, indent=2) for tool_call in tool_invocations\n", + " json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls\n", " )\n", - " msg = (\n", + " input_msg = (\n", " f\"Do you approve of the following tool invocations\\n\\n{tool_strs}\\n\\n\"\n", " \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\"\n", " )\n", - " resp = input(msg)\n", + " resp = input(input_msg)\n", " if resp.lower() not in (\"yes\", \"y\"):\n", " raise ValueError(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n", - " return tool_invocations" + " return msg" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 10, "id": "25dca07b-56ca-4b94-9955-d4f3e9895e03", "metadata": {}, "outputs": [ @@ -167,34 +189,38 @@ "Do you approve of the following tool invocations\n", "\n", "{\n", - " \"type\": \"count_emails\",\n", + " \"name\": \"count_emails\",\n", " \"args\": {\n", " \"last_n_days\": 5\n", - " }\n", + " },\n", + " \"id\": \"toolu_01LCpjpFxrRspygDscnHYyPm\"\n", "}\n", "\n", - "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. y\n" + "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. yes\n" ] }, { "data": { "text/plain": [ - "[{'type': 'count_emails', 'args': {'last_n_days': 5}, 'output': 10}]" + "[{'name': 'count_emails',\n", + " 'args': {'last_n_days': 5},\n", + " 'id': 'toolu_01LCpjpFxrRspygDscnHYyPm',\n", + " 'output': 10}]" ] }, - "execution_count": 31, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain = model | JsonOutputToolsParser() | human_approval | call_tool_list\n", + "chain = llm_with_tools | human_approval | call_tools\n", "chain.invoke(\"how many emails did i get in the last 5 days?\")" ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 11, "id": "f558f2cd-847b-4ef9-a770-3961082b540c", "metadata": {}, "outputs": [ @@ -205,11 +231,12 @@ "Do you approve of the following tool invocations\n", "\n", "{\n", - " \"type\": \"send_email\",\n", + " \"name\": \"send_email\",\n", " \"args\": {\n", " \"message\": \"What's up homie\",\n", " \"recipient\": \"sally@gmail.com\"\n", - " }\n", + " },\n", + " \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n", "}\n", "\n", "Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. no\n" @@ -217,20 +244,20 @@ }, { "ename": "ValueError", - "evalue": "Tool invocations not approved:\n\n{\n \"type\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n }\n}", + "evalue": "Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[32], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSend sally@gmail.com an email saying \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms up homie\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1774\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1773\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 1774\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1775\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1776\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 1777\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1778\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1779\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1780\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1781\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 1782\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3074\u001b[0m, in \u001b[0;36mRunnableLambda.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 3072\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Invoke this runnable synchronously.\"\"\"\u001b[39;00m\n\u001b[1;32m 3073\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfunc\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m-> 3074\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3075\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_invoke\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3076\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3077\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_config\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3078\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3079\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3080\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 3081\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 3082\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot invoke a coroutine function synchronously.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3083\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse `ainvoke` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3084\u001b[0m )\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:975\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 971\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 972\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(var_child_runnable_config\u001b[38;5;241m.\u001b[39mset, child_config)\n\u001b[1;32m 973\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 974\u001b[0m Output,\n\u001b[0;32m--> 975\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 976\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 977\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 978\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 979\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 980\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 981\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 982\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 983\u001b[0m )\n\u001b[1;32m 984\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 985\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:323\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 322\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 323\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2950\u001b[0m, in \u001b[0;36mRunnableLambda._invoke\u001b[0;34m(self, input, run_manager, config, **kwargs)\u001b[0m\n\u001b[1;32m 2948\u001b[0m output \u001b[38;5;241m=\u001b[39m chunk\n\u001b[1;32m 2949\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2950\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2951\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 2952\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2953\u001b[0m \u001b[38;5;66;03m# If the output is a runnable, invoke it\u001b[39;00m\n\u001b[1;32m 2954\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, Runnable):\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:323\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 321\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 322\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 323\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "Cell \u001b[0;32mIn[30], line 11\u001b[0m, in \u001b[0;36mhuman_approval\u001b[0;34m(tool_invocations)\u001b[0m\n\u001b[1;32m 9\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(msg)\n\u001b[1;32m 10\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m resp\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 11\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool invocations not approved:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtool_strs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_invocations\n", - "\u001b[0;31mValueError\u001b[0m: Tool invocations not approved:\n\n{\n \"type\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n }\n}" + "Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSend sally@gmail.com an email saying \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms up homie\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3961\u001b[0m, in \u001b[0;36mRunnableLambda.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 3959\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Invoke this runnable synchronously.\"\"\"\u001b[39;00m\n\u001b[1;32m 3960\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfunc\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m-> 3961\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3962\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_invoke\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3963\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3964\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_config\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3965\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3966\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3967\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 3968\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 3969\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot invoke a coroutine function synchronously.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3970\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse `ainvoke` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3971\u001b[0m )\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1625\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 1621\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 1622\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(var_child_runnable_config\u001b[38;5;241m.\u001b[39mset, child_config)\n\u001b[1;32m 1623\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 1624\u001b[0m Output,\n\u001b[0;32m-> 1625\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1626\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1627\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1628\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1629\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1630\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1631\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1632\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 1633\u001b[0m )\n\u001b[1;32m 1634\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1635\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3835\u001b[0m, in \u001b[0;36mRunnableLambda._invoke\u001b[0;34m(self, input, run_manager, config, **kwargs)\u001b[0m\n\u001b[1;32m 3833\u001b[0m output \u001b[38;5;241m=\u001b[39m chunk\n\u001b[1;32m 3834\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3835\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3836\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 3837\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3838\u001b[0m \u001b[38;5;66;03m# If the output is a runnable, invoke it\u001b[39;00m\n\u001b[1;32m 3839\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, Runnable):\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "Cell \u001b[0;32mIn[9], line 14\u001b[0m, in \u001b[0;36mhuman_approval\u001b[0;34m(msg)\u001b[0m\n\u001b[1;32m 12\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(input_msg)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m resp\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool invocations not approved:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtool_strs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m msg\n", + "\u001b[0;31mValueError\u001b[0m: Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}" ] } ], @@ -249,9 +276,9 @@ ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/multiple_tools.ipynb b/docs/docs/use_cases/tool_use/multiple_tools.ipynb index 2bae4f24d36204..cdc27dcb3c1d8d 100644 --- a/docs/docs/use_cases/tool_use/multiple_tools.ipynb +++ b/docs/docs/use_cases/tool_use/multiple_tools.ipynb @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain-core" ] }, { @@ -45,12 +45,12 @@ "id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542", "metadata": {}, "source": [ - "And set these environment variables:" + "If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "4185e74b-0500-4cad-ace0-bac37de466ac", "metadata": {}, "outputs": [], @@ -58,9 +58,6 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# If you'd like to use LangSmith, uncomment the below\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] @@ -77,7 +74,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "e13ec98c-8521-4d63-b521-caf92da87b70", "metadata": {}, "outputs": [], @@ -96,12 +93,12 @@ "id": "3de233af-b3bd-4f0c-8b1a-83527143a8db", "metadata": {}, "source": [ - "And now we can add to it a `exponentiate` and `add` tool:" + "And now we can add to it an `exponentiate` and `add` tool:" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "id": "e93661cd-a2ba-4ada-91ad-baf1b60879ec", "metadata": {}, "outputs": [], @@ -123,60 +120,78 @@ "id": "bbea4555-ed10-4a18-b802-e9a3071f132b", "metadata": {}, "source": [ - "The main difference between using one Tool and many, is that in the case of many we can't be sure which Tool the model will invoke. So we cannot hardcode, like we did in the [Quickstart](/docs/use_cases/tool_use/quickstart), a specific tool into our chain. Instead we'll add `call_tool_list`, a `RunnableLambda` that takes the `JsonOutputToolsParser` output and actually builds the end of the chain based on it, meaning it appends the Tools that were envoked to the end of the chain at runtime. We can do this because LCEL has the cool property that in any Runnable (the core building block of LCEL) sequence, if one component returns more Runnables, those are run as part of the chain." + "The main difference between using one Tool and many is that we can't be sure which Tool the model will invoke upfront, so we cannot hardcode, like we did in the [Quickstart](/docs/use_cases/tool_use/quickstart), a specific tool into our chain. Instead we'll add `call_tools`, a `RunnableLambda` that takes the output AI message with tools calls and routes to the correct tools.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, + "id": "f00f0f3f-8530-4c1d-a26c-d20824e31faf", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_anthropic import ChatAnthropic\n", + "\n", + "llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, "id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2", "metadata": {}, "outputs": [], "source": [ "from operator import itemgetter\n", - "from typing import Union\n", + "from typing import Dict, List, Union\n", "\n", - "from langchain.output_parsers import JsonOutputToolsParser\n", + "from langchain_core.messages import AIMessage\n", "from langchain_core.runnables import (\n", " Runnable,\n", " RunnableLambda,\n", " RunnableMap,\n", " RunnablePassthrough,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo\")\n", "tools = [multiply, exponentiate, add]\n", - "model_with_tools = model.bind_tools(tools)\n", + "llm_with_tools = llm.bind_tools(tools)\n", "tool_map = {tool.name: tool for tool in tools}\n", "\n", "\n", - "def call_tool(tool_invocation: dict) -> Union[str, Runnable]:\n", - " \"\"\"Function for dynamically constructing the end of the chain based on the model-selected tool.\"\"\"\n", - " tool = tool_map[tool_invocation[\"type\"]]\n", - " return RunnablePassthrough.assign(output=itemgetter(\"args\") | tool)\n", + "def call_tools(msg: AIMessage) -> Runnable:\n", + " \"\"\"Simple sequential tool calling helper.\"\"\"\n", + " tool_map = {tool.name: tool for tool in tools}\n", + " tool_calls = msg.tool_calls.copy()\n", + " for tool_call in tool_calls:\n", + " tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n", + " return tool_calls\n", "\n", "\n", - "# .map() allows us to apply a function to a list of inputs.\n", - "call_tool_list = RunnableLambda(call_tool).map()\n", - "chain = model_with_tools | JsonOutputToolsParser() | call_tool_list" + "chain = llm_with_tools | call_tools" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 12, "id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'multiply',\n", + "[{'name': 'multiply',\n", " 'args': {'first_int': 23, 'second_int': 7},\n", + " 'id': 'toolu_01Wf8kUs36kxRKLDL8vs7G8q',\n", " 'output': 161}]" ] }, - "execution_count": 14, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -187,19 +202,20 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 13, "id": "b1c6c0f8-6d04-40d4-a40e-8719ca7b27c2", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'add',\n", + "[{'name': 'add',\n", " 'args': {'first_int': 1000000, 'second_int': 1000000000},\n", + " 'id': 'toolu_012aK4xZBQg2sXARsFZnqxHh',\n", " 'output': 1001000000}]" ] }, - "execution_count": 15, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -210,19 +226,20 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 14, "id": "ce76f299-1a4d-421c-afa4-a6346e34285c", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'exponentiate',\n", + "[{'name': 'exponentiate',\n", " 'args': {'base': 37, 'exponent': 3},\n", + " 'id': 'toolu_01VDU6X3ugDb9cpnnmCZFPbC',\n", " 'output': 50653}]" ] }, - "execution_count": 16, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -234,9 +251,9 @@ ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/parallel.ipynb b/docs/docs/use_cases/tool_use/parallel.ipynb index e8513281685e45..f0a22567a99597 100644 --- a/docs/docs/use_cases/tool_use/parallel.ipynb +++ b/docs/docs/use_cases/tool_use/parallel.ipynb @@ -7,7 +7,7 @@ "source": [ "# Parallel tool use\n", "\n", - "In the [Chains with multiple tools](/docs/use_cases/tool_use/multiple_tools) guide we saw how to build function-calling chains that select between multiple tools. Some models, like the OpenAI models released in Fall 2023, also support parallel function calling, which allows you to invoke multiple functions (or the same function multiple times) in a single model call. Our previous chain from the multiple tools guides actually already supports this, we just need to use an OpenAI model capable of parallel function calling." + "In the [Chains with multiple tools](/docs/use_cases/tool_use/multiple_tools) guide we saw how to build function-calling chains that select between multiple tools. Some models, like the OpenAI models released in Fall 2023, also support parallel function calling, which allows you to invoke multiple functions (or the same function multiple times) in a single model call. Our previous chain from the multiple tools guides actually already supports this." ] }, { @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain-core" ] }, { @@ -35,7 +35,7 @@ "id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542", "metadata": {}, "source": [ - "And set these environment variables:" + "If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:" ] }, { @@ -48,9 +48,6 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# If you'd like to use LangSmith, uncomment the below\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] @@ -65,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "id": "e13ec98c-8521-4d63-b521-caf92da87b70", "metadata": {}, "outputs": [], @@ -98,67 +95,91 @@ "source": [ "# Chain\n", "\n", - "Notice we use an `-1106` model, which as of this writing is the only kind that supports parallel function calling:" + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, + "id": "f67d91d8-cc38-4065-8f80-901e079954dd", + "metadata": {}, + "outputs": [], + "source": [ + "# | echo: false\n", + "# | output: false\n", + "\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, "id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2", "metadata": {}, "outputs": [], "source": [ "from operator import itemgetter\n", - "from typing import Union\n", + "from typing import Dict, List, Union\n", "\n", - "from langchain.output_parsers import JsonOutputToolsParser\n", + "from langchain_core.messages import AIMessage\n", "from langchain_core.runnables import (\n", " Runnable,\n", " RunnableLambda,\n", " RunnableMap,\n", " RunnablePassthrough,\n", ")\n", - "from langchain_openai import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")\n", "tools = [multiply, exponentiate, add]\n", - "model_with_tools = model.bind_tools(tools)\n", + "llm_with_tools = llm.bind_tools(tools)\n", "tool_map = {tool.name: tool for tool in tools}\n", "\n", "\n", - "def call_tool(tool_invocation: dict) -> Union[str, Runnable]:\n", - " \"\"\"Function for dynamically constructing the end of the chain based on the model-selected tool.\"\"\"\n", - " tool = tool_map[tool_invocation[\"type\"]]\n", - " return RunnablePassthrough.assign(output=itemgetter(\"args\") | tool)\n", + "def call_tools(msg: AIMessage) -> Runnable:\n", + " \"\"\"Simple sequential tool calling helper.\"\"\"\n", + " tool_map = {tool.name: tool for tool in tools}\n", + " tool_calls = msg.tool_calls.copy()\n", + " for tool_call in tool_calls:\n", + " tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n", + " return tool_calls\n", "\n", "\n", - "# .map() allows us to apply a function to a list of inputs.\n", - "call_tool_list = RunnableLambda(call_tool).map()\n", - "chain = model_with_tools | JsonOutputToolsParser() | call_tool_list" + "chain = llm_with_tools | call_tools" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 9, "id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'multiply',\n", + "[{'name': 'multiply',\n", " 'args': {'first_int': 23, 'second_int': 7},\n", + " 'id': 'call_22tgOrsVLyLMsl2RLbUhtycw',\n", " 'output': 161},\n", - " {'type': 'add', 'args': {'first_int': 5, 'second_int': 18}, 'output': 23},\n", - " {'type': 'add',\n", + " {'name': 'multiply',\n", + " 'args': {'first_int': 5, 'second_int': 18},\n", + " 'id': 'call_EbKHEG3TjqBhEwb7aoxUtgzf',\n", + " 'output': 90},\n", + " {'name': 'add',\n", " 'args': {'first_int': 1000000, 'second_int': 1000000000},\n", + " 'id': 'call_LUhu2IT3vINxlTc5fCVY6Nhi',\n", " 'output': 1001000000},\n", - " {'type': 'exponentiate',\n", + " {'name': 'exponentiate',\n", " 'args': {'base': 37, 'exponent': 3},\n", + " 'id': 'call_bnCZIXelOKkmcyd4uGXId9Ct',\n", " 'output': 50653}]" ] }, - "execution_count": 12, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -172,9 +193,9 @@ ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/prompting.ipynb b/docs/docs/use_cases/tool_use/prompting.ipynb index 09dcf0b4607108..6e36db4330d66c 100644 --- a/docs/docs/use_cases/tool_use/prompting.ipynb +++ b/docs/docs/use_cases/tool_use/prompting.ipynb @@ -15,9 +15,9 @@ "id": "14b94240", "metadata": {}, "source": [ - "# Tool use without function calling\n", + "# Using models that don't support tool calling\n", "\n", - "In this guide we'll build a Chain that does not rely on any special model APIs (like function-calling, which we showed in the [Quickstart](/docs/use_cases/tool_use/quickstart)) and instead just prompts the model directly to invoke tools." + "In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/use_cases/tool_use/quickstart)) and instead just prompts the model directly to invoke tools." ] }, { @@ -393,9 +393,9 @@ ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/quickstart.ipynb b/docs/docs/use_cases/tool_use/quickstart.ipynb index d363ab853ae470..3b5a476d483963 100644 --- a/docs/docs/use_cases/tool_use/quickstart.ipynb +++ b/docs/docs/use_cases/tool_use/quickstart.ipynb @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain" ] }, { @@ -45,7 +45,7 @@ "id": "36a9c6fc-8264-462f-b8d7-9c7bbec22ef9", "metadata": {}, "source": [ - "And set these environment variables:" + "If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:" ] }, { @@ -58,9 +58,6 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# If you'd like to use LangSmith, uncomment the below\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] @@ -77,7 +74,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 6, "id": "90187d07", "metadata": {}, "outputs": [], @@ -145,22 +142,31 @@ "\n", "![chain](../../../static/img/tool_chain.svg)\n", "\n", - "### Function calling\n", - "One of the most reliable ways to use tools with LLMs is with function calling APIs (also sometimes called tool calling or parallel function calling). This only works with models that explicitly support function calling, like OpenAI models. To learn more head to the [function calling guide](/docs/modules/model_io/chat/function_calling).\n", + "### Tool/function calling\n", + "One of the most reliable ways to use tools with LLMs is with tool calling APIs (also sometimes called function calling). This only works with models that explicitly support tool calling. You can see which models support tool calling [here](/docs/integrations/chat/), and learn more about how to use tool calling in [this guide](/docs/modules/model_io/chat/function_calling).\n", + "\n", + "First we'll define our model and tools. We'll start with just a single tool, `multiply`.\n", "\n", - "First we'll define our model and tools. We'll start with just a single tool, `multiply`." + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "id": "9bce8935-1465-45ac-8a93-314222c753c4", "metadata": {}, "outputs": [], "source": [ + "# | echo: false\n", + "# | output: false\n", + "\n", "from langchain_openai.chat_models import ChatOpenAI\n", "\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\")" + "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)" ] }, { @@ -168,131 +174,57 @@ "id": "c22e6f0f-c5ad-4c0f-9514-e626704ea51c", "metadata": {}, "source": [ - "Next we'll convert our LangChain Tool to an OpenAI format JSONSchema function, and bind this as the `tools` argument to be passed to all ChatOpenAI calls. Since we only have a single Tool and in this initial chain we want to make sure it's always used, we'll also specify `tool_choice`. See the [OpenAI chat API reference](https://platform.openai.com/docs/api-reference/chat/create#chat-create-tool_choice) for more on these parameters:" + "We'll use `bind_tools` to pass the definition of our tool in as part of each call to the model, so that the model can invoke the tool when appropriate:" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 8, "id": "3bfe2cdc-7d72-457c-a9a1-5fa1e0bcde55", "metadata": {}, "outputs": [], "source": [ - "model_with_tools = model.bind_tools([multiply], tool_choice=\"multiply\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "19f6285f-d8b1-432c-8c07-f7aee3fc0fa4", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'type': 'function',\n", - " 'function': {'name': 'multiply',\n", - " 'description': 'multiply(first_int: int, second_int: int) -> int - Multiply two integers together.',\n", - " 'parameters': {'type': 'object',\n", - " 'properties': {'first_int': {'type': 'integer'},\n", - " 'second_int': {'type': 'integer'}},\n", - " 'required': ['first_int', 'second_int']}}}]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "model_with_tools.kwargs[\"tools\"]" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "340c1b04-38cb-4467-83ca-8aa2b59176d8", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'type': 'function', 'function': {'name': 'multiply'}}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "model_with_tools.kwargs[\"tool_choice\"]" + "llm_with_tools = llm.bind_tools([multiply])" ] }, { "cell_type": "markdown", - "id": "9fa2ba14-9a97-4960-a6c7-422edecdaf4b", + "id": "07fc830e-a6d2-4fac-904b-b94072e64018", "metadata": {}, "source": [ - "Now we'll compose out tool-calling model with a `JsonOutputToolsParser`, a built-in LangChain output parser that converts an OpenAI function-calling response to a list of `{\"type\": \"TOOL_NAME\", \"args\": {...}}` dicts with the tools to invoke and arguments to invoke them with." + "When the model invokes the tool, this will show up in the `AIMessage.tool_calls` attribute of the output:" ] }, { "cell_type": "code", - "execution_count": 7, - "id": "5518aba4-c44d-4896-9b63-fc9d56c245df", + "execution_count": 9, + "id": "68f30343-14ef-48f1-badd-b6a03977316d", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "[{'type': 'multiply', 'args': {'first_int': 4, 'second_int': 23}}]" + "[{'name': 'multiply',\n", + " 'args': {'first_int': 5, 'second_int': 42},\n", + " 'id': 'call_cCP9oA3tRz7HDrjFn1FdmDaG'}]" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.output_parsers import JsonOutputToolsParser\n", - "\n", - "chain = model_with_tools | JsonOutputToolsParser()\n", - "chain.invoke(\"What's four times 23\")" + "msg = llm_with_tools.invoke(\"whats 5 times forty two\")\n", + "msg.tool_calls" ] }, { "cell_type": "markdown", - "id": "7f712d8d-0314-4d3d-b563-378b72fd8bb5", - "metadata": {}, - "source": [ - "Since we know we're always invoking the `multiply` tool, we can simplify our output a bit to return only the args for the `multiply` tool using the `JsonoutputKeyToolsParser`. To further simplify we'll specify `first_tool_only=True`, so that instead of a list of tool invocations our output parser returns only the first tool invocation." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "cfacfcdc-8a45-4c60-a175-7efe9534f83e", + "id": "330015a3-a5a7-433a-826a-6277766f6c27", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'first_int': 4, 'second_int': 23}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "from langchain.output_parsers import JsonOutputKeyToolsParser\n", - "\n", - "chain = model_with_tools | JsonOutputKeyToolsParser(\n", - " key_name=\"multiply\", first_tool_only=True\n", - ")\n", - "chain.invoke(\"What's four times 23\")" + "Check out the [LangSmith trace here](https://smith.langchain.com/public/81ff0cbd-e05b-4720-bf61-2c9807edb708/r)." ] }, { @@ -302,12 +234,12 @@ "source": [ "### Invoking the tool\n", "\n", - "Great! We're able to generate tool invocations. But what if we want to actually call the tool? To do that we just need to pass them to the tool:" + "Great! We're able to generate tool invocations. But what if we want to actually call the tool? To do so we'll need to pass the generated tool args to our tool. As a simple example we'll just extract the arguments of the first tool_call:" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "id": "4f5325ca-e5dc-4d1a-ba36-b085a029c90a", "metadata": {}, "outputs": [ @@ -317,7 +249,7 @@ "92" ] }, - "execution_count": 10, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -325,15 +257,18 @@ "source": [ "from operator import itemgetter\n", "\n", - "# Note: the `.map()` at the end of `multiply` allows us to pass in a list of `multiply` arguments instead of a single one.\n", - "chain = (\n", - " model_with_tools\n", - " | JsonOutputKeyToolsParser(key_name=\"multiply\", first_tool_only=True)\n", - " | multiply\n", - ")\n", + "chain = llm_with_tools | (lambda x: x.tool_calls[0][\"args\"]) | multiply\n", "chain.invoke(\"What's four times 23\")" ] }, + { + "cell_type": "markdown", + "id": "79a9eb63-383d-4dd4-a162-08b4a52ef4d9", + "metadata": {}, + "source": [ + "Check out the [LangSmith trace here](https://smith.langchain.com/public/16bbabb9-fc9b-41e5-a33d-487c42df4f85/r)." + ] + }, { "cell_type": "markdown", "id": "0521d3d5", @@ -345,47 +280,54 @@ "\n", "LangChain comes with a number of built-in agents that are optimized for different use cases. Read about all the [agent types here](/docs/modules/agents/agent_types/).\n", "\n", - "As an example, let's try out the OpenAI tools agent, which makes use of the new OpenAI tool-calling API (this is only available in the latest OpenAI models, and differs from function-calling in that the model can return multiple function invocations at once)\n", + "We'll use the [tool calling agent](/docs/modules/agents/agent_types/tool_calling/), which is generally the most reliable kind and the recommended one for most use cases.\n", "\n", "![agent](../../../static/img/tool_agent.svg)" ] }, { "cell_type": "code", - "execution_count": 86, + "execution_count": 13, "id": "21723cf4-9421-4a8d-92a6-eeeb8f4367f1", "metadata": {}, "outputs": [], "source": [ "from langchain import hub\n", - "from langchain.agents import AgentExecutor, create_openai_tools_agent\n", - "from langchain_openai import ChatOpenAI" + "from langchain.agents import AgentExecutor, create_tool_calling_agent" ] }, { "cell_type": "code", - "execution_count": 88, + "execution_count": 14, "id": "6be83879-9da3-4dd9-b147-a79f76affd7a", "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], template='You are a helpful assistant')),\n", - " MessagesPlaceholder(variable_name='chat_history', optional=True),\n", - " HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], template='{input}')),\n", - " MessagesPlaceholder(variable_name='agent_scratchpad')]" - ] - }, - "execution_count": 88, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "================================\u001b[1m System Message \u001b[0m================================\n", + "\n", + "You are a helpful assistant\n", + "\n", + "=============================\u001b[1m Messages Placeholder \u001b[0m=============================\n", + "\n", + "\u001b[33;1m\u001b[1;3m{chat_history}\u001b[0m\n", + "\n", + "================================\u001b[1m Human Message \u001b[0m=================================\n", + "\n", + "\u001b[33;1m\u001b[1;3m{input}\u001b[0m\n", + "\n", + "=============================\u001b[1m Messages Placeholder \u001b[0m=============================\n", + "\n", + "\u001b[33;1m\u001b[1;3m{agent_scratchpad}\u001b[0m\n" + ] } ], "source": [ - "# Get the prompt to use - you can modify this!\n", + "# Get the prompt to use - can be replaced with any prompt that includes variables \"agent_scratchpad\" and \"input\"!\n", "prompt = hub.pull(\"hwchase17/openai-tools-agent\")\n", - "prompt.messages" + "prompt.pretty_print()" ] }, { @@ -398,7 +340,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 15, "id": "95c86d32-ee45-4c87-a28c-14eff19b49e9", "metadata": {}, "outputs": [], @@ -420,22 +362,18 @@ }, { "cell_type": "code", - "execution_count": 90, + "execution_count": 16, "id": "17b09ac6-c9b7-4340-a8a0-3d3061f7888c", "metadata": {}, "outputs": [], "source": [ - "# Choose the LLM that will drive the agent\n", - "# Only certain models support this\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo-1106\", temperature=0)\n", - "\n", - "# Construct the OpenAI Tools agent\n", - "agent = create_openai_tools_agent(model, tools, prompt)" + "# Construct the tool calling agent\n", + "agent = create_tool_calling_agent(llm, tools, prompt)" ] }, { "cell_type": "code", - "execution_count": 91, + "execution_count": 17, "id": "675091d2-cac9-45c4-a5d7-b760ee6c1986", "metadata": {}, "outputs": [], @@ -454,7 +392,7 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": 18, "id": "f7dbb240-809e-4e41-8f63-1a4636e8e26d", "metadata": {}, "outputs": [ @@ -478,10 +416,16 @@ "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3m3645\u001b[0m\u001b[32;1m\u001b[1;3m\n", - "Invoking: `exponentiate` with `{'base': 3645, 'exponent': 2}`\n", + "Invoking: `exponentiate` with `{'base': 405, 'exponent': 2}`\n", + "\n", + "\n", + "\u001b[0m\u001b[38;5;200m\u001b[1;3m164025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of taking 3 to the fifth power is 243. \n", + "\n", + "The sum of twelve and three is 15. \n", "\n", + "Multiplying 243 by 15 gives 3645. \n", "\n", - "\u001b[0m\u001b[38;5;200m\u001b[1;3m13286025\u001b[0m\u001b[32;1m\u001b[1;3mThe result of raising 3 to the fifth power and multiplying that by the sum of twelve and three, then squaring the whole result is 13,286,025.\u001b[0m\n", + "Finally, squaring 3645 gives 164025.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] @@ -490,10 +434,10 @@ "data": { "text/plain": [ "{'input': 'Take 3 to the fifth power and multiply that by the sum of twelve and three, then square the whole result',\n", - " 'output': 'The result of raising 3 to the fifth power and multiplying that by the sum of twelve and three, then squaring the whole result is 13,286,025.'}" + " 'output': 'The result of taking 3 to the fifth power is 243. \\n\\nThe sum of twelve and three is 15. \\n\\nMultiplying 243 by 15 gives 3645. \\n\\nFinally, squaring 3645 gives 164025.'}" ] }, - "execution_count": 95, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -506,6 +450,14 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "8fdb0ed9-1763-4778-a7d6-026578cd9585", + "metadata": {}, + "source": [ + "Check out the [LangSmith trace here](https://smith.langchain.com/public/eeeb27a4-a2f8-4f06-a3af-9c983f76146c/r)." + ] + }, { "cell_type": "markdown", "id": "b0e4b7f4-58ce-4ca0-a986-d05a436a7ccf", @@ -524,9 +476,9 @@ ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "poetry-venv" + "name": "python3" }, "language_info": { "codemirror_mode": { diff --git a/docs/docs/use_cases/tool_use/tool_error_handling.ipynb b/docs/docs/use_cases/tool_use/tool_error_handling.ipynb index c129b0ebeeafb0..db0fe2a1969fe2 100644 --- a/docs/docs/use_cases/tool_use/tool_error_handling.ipynb +++ b/docs/docs/use_cases/tool_use/tool_error_handling.ipynb @@ -5,7 +5,7 @@ "id": "5d60cbb9-2a6a-43ea-a9e9-f67b16ddd2b2", "metadata": {}, "source": [ - "# Tool error handling\n", + "# Handling tool errors\n", "\n", "Using a model to invoke a tool has some obvious potential failure modes. Firstly, the model needs to return a output that can be parsed at all. Secondly, the model needs to return tool arguments that are valid.\n", "\n", @@ -29,7 +29,7 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install --upgrade --quiet langchain langchain-openai" + "%pip install --upgrade --quiet langchain-core langchain-openai" ] }, { @@ -37,7 +37,7 @@ "id": "68107597-0c8c-4bb5-8c12-9992fabdf71a", "metadata": {}, "source": [ - "And set these environment variables:" + "If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:" ] }, { @@ -50,9 +50,6 @@ "import getpass\n", "import os\n", "\n", - "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n", - "\n", - "# If you'd like to use LangSmith, uncomment the below:\n", "# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", "# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()" ] @@ -64,12 +61,33 @@ "source": [ "## Chain\n", "\n", - "Suppose we have the following (dummy) tool and tool-calling chain. We'll make our tool intentionally convoluted to try and trip up the model." + "Suppose we have the following (dummy) tool and tool-calling chain. We'll make our tool intentionally convoluted to try and trip up the model.\n", + "\n", + "```{=mdx}\n", + "import ChatModelTabs from \"@theme/ChatModelTabs\";\n", + "\n", + "\n", + "```" ] }, { "cell_type": "code", "execution_count": 1, + "id": "86258950-5e61-4340-81b9-84a5d26e8773", + "metadata": {}, + "outputs": [], + "source": [ + "# | echo: false\n", + "# | output: false\n", + "\n", + "from langchain_openai import ChatOpenAI\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "id": "1d20604e-c4d1-4d21-841b-23e4f61aec36", "metadata": {}, "outputs": [], @@ -91,13 +109,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Define model and bind tool\n", - "from langchain_openai import ChatOpenAI\n", - "\n", - "model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n", - "model_with_tools = model.bind_tools(\n", + "llm_with_tools = llm.bind_tools(\n", " [complex_tool],\n", - " tool_choice=\"complex_tool\",\n", ")" ] }, @@ -109,16 +122,7 @@ "outputs": [], "source": [ "# Define chain\n", - "from operator import itemgetter\n", - "\n", - "from langchain.output_parsers import JsonOutputKeyToolsParser\n", - "from langchain_core.runnables import Runnable, RunnableLambda, RunnablePassthrough\n", - "\n", - "chain = (\n", - " model_with_tools\n", - " | JsonOutputKeyToolsParser(key_name=\"complex_tool\", first_tool_only=True)\n", - " | complex_tool\n", - ")" + "chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool" ] }, { @@ -131,25 +135,26 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 12, "id": "d354664c-ac44-4967-a35f-8912b3ad9477", "metadata": {}, "outputs": [ { "ename": "ValidationError", - "evalue": "1 validation error for complex_toolSchemaSchema\ndict_arg\n field required (type=value_error.missing)", + "evalue": "1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[6], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1774\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 1772\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1773\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 1774\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1775\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1776\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 1777\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1778\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1779\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1780\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1781\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 1782\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:210\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 206\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 207\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 208\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 209\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 210\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 211\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 212\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 213\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 214\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 215\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 216\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 217\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:315\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, **kwargs)\u001b[0m\n\u001b[1;32m 301\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrun\u001b[39m(\n\u001b[1;32m 302\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 303\u001b[0m tool_input: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 312\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 313\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 314\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Run the tool.\"\"\"\u001b[39;00m\n\u001b[0;32m--> 315\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 316\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mverbose \u001b[38;5;129;01mand\u001b[39;00m verbose \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 317\u001b[0m verbose_ \u001b[38;5;241m=\u001b[39m verbose\n", - "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:250\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 248\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 249\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 250\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 251\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 252\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 253\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 254\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 255\u001b[0m }\n\u001b[1;32m 256\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n", + "Cell \u001b[0;32mIn[12], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43muse complex tool. the args are 5, 2.1, empty dictionary. don\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mt forget dict_arg\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 3\u001b[0m \u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:241\u001b[0m, in \u001b[0;36mBaseTool.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 234\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 235\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 236\u001b[0m \u001b[38;5;28minput\u001b[39m: Union[\u001b[38;5;28mstr\u001b[39m, Dict],\n\u001b[1;32m 237\u001b[0m config: Optional[RunnableConfig] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 238\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 239\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Any:\n\u001b[1;32m 240\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[0;32m--> 241\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 242\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:387\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ValidationError \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error:\n\u001b[0;32m--> 387\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 388\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_validation_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 389\u001b[0m observation \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool input validation error\u001b[39m\u001b[38;5;124m\"\u001b[39m\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:378\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 364\u001b[0m run_manager \u001b[38;5;241m=\u001b[39m callback_manager\u001b[38;5;241m.\u001b[39mon_tool_start(\n\u001b[1;32m 365\u001b[0m {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mname, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdescription\u001b[39m\u001b[38;5;124m\"\u001b[39m: \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdescription},\n\u001b[1;32m 366\u001b[0m tool_input \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(tool_input, \u001b[38;5;28mstr\u001b[39m) \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mstr\u001b[39m(tool_input),\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 376\u001b[0m )\n\u001b[1;32m 377\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 378\u001b[0m parsed_input \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_parse_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 379\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 380\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 382\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 384\u001b[0m )\n", + "File \u001b[0;32m~/langchain/libs/core/langchain_core/tools.py:283\u001b[0m, in \u001b[0;36mBaseTool._parse_input\u001b[0;34m(self, tool_input)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 282\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m input_args \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 283\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43minput_args\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mparse_obj\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtool_input\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m {\n\u001b[1;32m 285\u001b[0m k: \u001b[38;5;28mgetattr\u001b[39m(result, k)\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m result\u001b[38;5;241m.\u001b[39mdict()\u001b[38;5;241m.\u001b[39mitems()\n\u001b[1;32m 287\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m k \u001b[38;5;129;01min\u001b[39;00m tool_input\n\u001b[1;32m 288\u001b[0m }\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m tool_input\n", "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:526\u001b[0m, in \u001b[0;36mBaseModel.parse_obj\u001b[0;34m(cls, obj)\u001b[0m\n\u001b[1;32m 524\u001b[0m exc \u001b[38;5;241m=\u001b[39m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m expected dict not \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mobj\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 525\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ValidationError([ErrorWrapper(exc, loc\u001b[38;5;241m=\u001b[39mROOT_KEY)], \u001b[38;5;28mcls\u001b[39m) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[0;32m--> 526\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mobj\u001b[49m\u001b[43m)\u001b[49m\n", "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/v1/main.py:341\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m values, fields_set, validation_error \u001b[38;5;241m=\u001b[39m validate_model(__pydantic_self__\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__class__\u001b[39m, data)\n\u001b[1;32m 340\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m validation_error:\n\u001b[0;32m--> 341\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m validation_error\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 343\u001b[0m object_setattr(__pydantic_self__, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__dict__\u001b[39m\u001b[38;5;124m'\u001b[39m, values)\n", - "\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchemaSchema\ndict_arg\n field required (type=value_error.missing)" + "\u001b[0;31mValidationError\u001b[0m: 1 validation error for complex_toolSchema\ndict_arg\n field required (type=value_error.missing)" ] } ], @@ -171,14 +176,14 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "8fedb550-683d-45ae-8876-ae7acb332019", "metadata": {}, "outputs": [], "source": [ "from typing import Any\n", "\n", - "from langchain_core.runnables import RunnableConfig\n", + "from langchain_core.runnables import Runnable, RunnableConfig\n", "\n", "\n", "def try_except_tool(tool_args: dict, config: RunnableConfig) -> Runnable:\n", @@ -188,16 +193,12 @@ " return f\"Calling tool with arguments:\\n\\n{tool_args}\\n\\nraised the following error:\\n\\n{type(e)}: {e}\"\n", "\n", "\n", - "chain = (\n", - " model_with_tools\n", - " | JsonOutputKeyToolsParser(key_name=\"complex_tool\", first_tool_only=True)\n", - " | try_except_tool\n", - ")" + "chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | try_except_tool" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "id": "71a2c98d-c0be-4c0a-bb3d-41ad4596526c", "metadata": {}, "outputs": [ @@ -211,7 +212,7 @@ "\n", "raised the following error:\n", "\n", - ": 1 validation error for complex_toolSchemaSchema\n", + ": 1 validation error for complex_toolSchema\n", "dict_arg\n", " field required (type=value_error.missing)\n" ] @@ -237,7 +238,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 17, "id": "02cc4223-35fa-4240-976a-012299ca703c", "metadata": {}, "outputs": [ @@ -247,25 +248,17 @@ "10.5" ] }, - "execution_count": 5, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "chain = (\n", - " model_with_tools\n", - " | JsonOutputKeyToolsParser(key_name=\"complex_tool\", first_tool_only=True)\n", - " | complex_tool\n", - ")\n", + "chain = llm_with_tools | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n", "better_model = ChatOpenAI(model=\"gpt-4-1106-preview\", temperature=0).bind_tools(\n", " [complex_tool], tool_choice=\"complex_tool\"\n", ")\n", - "better_chain = (\n", - " better_model\n", - " | JsonOutputKeyToolsParser(key_name=\"complex_tool\", first_tool_only=True)\n", - " | complex_tool\n", - ")\n", + "better_chain = better_model | (lambda msg: msg.tool_calls[0][\"args\"]) | complex_tool\n", "\n", "chain_with_fallback = chain.with_fallbacks([better_chain])\n", "chain_with_fallback.invoke(\n", @@ -278,7 +271,7 @@ "id": "412f8c4e-cc83-4d87-84a1-5ba2f8edb1e9", "metadata": {}, "source": [ - "Looking at the [Langsmith trace](https://smith.langchain.com/public/241e1266-8555-4d49-99dc-b8df46109c39/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds." + "Looking at the [Langsmith trace](https://smith.langchain.com/public/00e91fc2-e1a4-4b0f-a82e-e6b3119d196c/r) for this chain run, we can see that the first chain call fails as expected and it's the fallback that succeeds." ] }, { @@ -293,7 +286,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 13, "id": "b5659956-9454-468a-9753-a3ff9052b8f5", "metadata": {}, "outputs": [], @@ -301,7 +294,7 @@ "import json\n", "from typing import Any\n", "\n", - "from langchain_core.messages import AIMessage, HumanMessage, ToolMessage\n", + "from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage\n", "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables import RunnablePassthrough\n", "\n", @@ -309,36 +302,30 @@ "class CustomToolException(Exception):\n", " \"\"\"Custom LangChain tool exception.\"\"\"\n", "\n", - " def __init__(self, tool_call: dict, exception: Exception) -> None:\n", + " def __init__(self, tool_call: ToolCall, exception: Exception) -> None:\n", " super().__init__()\n", " self.tool_call = tool_call\n", " self.exception = exception\n", "\n", "\n", - "def tool_custom_exception(tool_call: dict, config: RunnableConfig) -> Runnable:\n", + "def tool_custom_exception(msg: AIMessage, config: RunnableConfig) -> Runnable:\n", " try:\n", - " return complex_tool.invoke(tool_call[\"args\"], config=config)\n", + " return complex_tool.invoke(msg.tool_calls[0][\"args\"], config=config)\n", " except Exception as e:\n", - " raise CustomToolException(tool_call, e)\n", + " raise CustomToolException(msg.tool_calls[0], e)\n", "\n", "\n", "def exception_to_messages(inputs: dict) -> dict:\n", " exception = inputs.pop(\"exception\")\n", - " tool_call = {\n", - " \"type\": \"function\",\n", - " \"function\": {\n", - " \"name\": \"complex_tool\",\n", - " \"arguments\": json.dumps(exception.tool_call[\"args\"]),\n", - " },\n", - " \"id\": exception.tool_call[\"id\"],\n", - " }\n", "\n", " # Add historical messages to the original input, so the model knows that it made a mistake with the last tool call.\n", " messages = [\n", - " AIMessage(content=\"\", additional_kwargs={\"tool_calls\": [tool_call]}),\n", - " ToolMessage(tool_call_id=tool_call[\"id\"], content=str(exception.exception)),\n", + " AIMessage(content=\"\", tool_calls=[exception.tool_call]),\n", + " ToolMessage(\n", + " tool_call_id=exception.tool_call[\"id\"], content=str(exception.exception)\n", + " ),\n", " HumanMessage(\n", - " content=\"The last tool calls raised exceptions. Try calling the tools again with corrected arguments.\"\n", + " content=\"The last tool call raised an exception. Try calling the tool again with corrected arguments. Do not repeat mistakes.\"\n", " ),\n", " ]\n", " inputs[\"last_output\"] = messages\n", @@ -351,14 +338,7 @@ "prompt = ChatPromptTemplate.from_messages(\n", " [(\"human\", \"{input}\"), MessagesPlaceholder(\"last_output\", optional=True)]\n", ")\n", - "chain = (\n", - " prompt\n", - " | model_with_tools\n", - " | JsonOutputKeyToolsParser(\n", - " key_name=\"complex_tool\", return_id=True, first_tool_only=True\n", - " )\n", - " | tool_custom_exception\n", - ")\n", + "chain = prompt | llm_with_tools | tool_custom_exception\n", "\n", "# If the initial chain call fails, we rerun it withe the exception passed in as a message.\n", "self_correcting_chain = chain.with_fallbacks(\n", @@ -368,7 +348,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 14, "id": "4c45f5bd-cbb4-47d5-b4b6-aec50673c750", "metadata": {}, "outputs": [ @@ -378,7 +358,7 @@ "10.5" ] }, - "execution_count": 10, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -396,15 +376,15 @@ "id": "50d269a9-3cab-4a37-ba2f-805296453627", "metadata": {}, "source": [ - "And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/b780b740-daf5-43aa-a217-6d4600aba41b/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds." + "And our chain succeeds! Looking at the [LangSmith trace](https://smith.langchain.com/public/c11e804c-e14f-4059-bd09-64766f999c14/r), we can see that indeed our initial chain still fails, and it's only on retrying that the chain succeeds." ] } ], "metadata": { "kernelspec": { - "display_name": "poetry-venv", + "display_name": "poetry-venv-2", "language": "python", - "name": "poetry-venv" + "name": "poetry-venv-2" }, "language_info": { "codemirror_mode": { diff --git a/libs/partners/anthropic/langchain_anthropic/chat_models.py b/libs/partners/anthropic/langchain_anthropic/chat_models.py index 20394227420617..faca14a49e0327 100644 --- a/libs/partners/anthropic/langchain_anthropic/chat_models.py +++ b/libs/partners/anthropic/langchain_anthropic/chat_models.py @@ -344,7 +344,6 @@ def _stream( ) -> Iterator[ChatGenerationChunk]: params = self._format_params(messages=messages, stop=stop, **kwargs) if _tools_in_params(params): - warnings.warn("stream: Tool use is not yet supported in streaming mode.") result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs )