diff --git a/libs/langchain/langchain/agents/json_chat/base.py b/libs/langchain/langchain/agents/json_chat/base.py index 7a70c0bf4c6..70228ce2735 100644 --- a/libs/langchain/langchain/agents/json_chat/base.py +++ b/libs/langchain/langchain/agents/json_chat/base.py @@ -1,4 +1,4 @@ -from typing import Sequence +from typing import List, Sequence, Union from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts.chat import ChatPromptTemplate @@ -15,7 +15,7 @@ def create_json_chat_agent( llm: BaseLanguageModel, tools: Sequence[BaseTool], prompt: ChatPromptTemplate, - stop_sequence: bool = True, + stop_sequence: Union[bool, List[str]] = True, tools_renderer: ToolsRenderer = render_text_description, ) -> Runnable: """Create an agent that uses JSON to format its logic, build for Chat Models. @@ -24,7 +24,11 @@ def create_json_chat_agent( llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more. - stop_sequence: Adds a stop token of "Observation:" to avoid hallucinates. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + Default is True. You may to set this to False if the LLM you are using does not support stop sequences. tools_renderer: This controls how the tools are converted into a string and @@ -158,7 +162,8 @@ def create_json_chat_agent( tool_names=", ".join([t.name for t in tools]), ) if stop_sequence: - llm_to_use = llm.bind(stop=["\nObservation"]) + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_to_use = llm.bind(stop=stop) else: llm_to_use = llm diff --git a/libs/langchain/langchain/agents/react/agent.py b/libs/langchain/langchain/agents/react/agent.py index 7c0f70bbd4a..531b4c74a05 100644 --- a/libs/langchain/langchain/agents/react/agent.py +++ b/libs/langchain/langchain/agents/react/agent.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Optional, Sequence +from typing import List, Optional, Sequence, Union from langchain_core.language_models import BaseLanguageModel from langchain_core.prompts import BasePromptTemplate @@ -19,6 +19,8 @@ def create_react_agent( prompt: BasePromptTemplate, output_parser: Optional[AgentOutputParser] = None, tools_renderer: ToolsRenderer = render_text_description, + *, + stop_sequence: Union[bool, List[str]] = True, ) -> Runnable: """Create an agent that uses ReAct prompting. @@ -29,6 +31,13 @@ def create_react_agent( output_parser: AgentOutputParser for parse the LLM output. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -108,7 +117,11 @@ def create_react_agent( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) - llm_with_stop = llm.bind(stop=["\nObservation"]) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm output_parser = output_parser or ReActSingleInputOutputParser() agent = ( RunnablePassthrough.assign( diff --git a/libs/langchain/langchain/agents/structured_chat/base.py b/libs/langchain/langchain/agents/structured_chat/base.py index 735160f639d..8eaf409490c 100644 --- a/libs/langchain/langchain/agents/structured_chat/base.py +++ b/libs/langchain/langchain/agents/structured_chat/base.py @@ -1,5 +1,5 @@ import re -from typing import Any, List, Optional, Sequence, Tuple +from typing import Any, List, Optional, Sequence, Tuple, Union from langchain_core._api import deprecated from langchain_core.agents import AgentAction @@ -155,6 +155,8 @@ def create_structured_chat_agent( tools: Sequence[BaseTool], prompt: ChatPromptTemplate, tools_renderer: ToolsRenderer = render_text_description_and_args, + *, + stop_sequence: Union[bool, List[str]] = True, ) -> Runnable: """Create an agent aimed at supporting tools with multiple inputs. @@ -162,6 +164,13 @@ def create_structured_chat_agent( llm: LLM to use as the agent. tools: Tools this agent has access to. prompt: The prompt to use. See Prompt section below for more. + stop_sequence: bool or list of str. + If True, adds a stop token of "Observation:" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. @@ -273,7 +282,11 @@ def create_structured_chat_agent( tools=tools_renderer(list(tools)), tool_names=", ".join([t.name for t in tools]), ) - llm_with_stop = llm.bind(stop=["Observation"]) + if stop_sequence: + stop = ["\nObservation"] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm agent = ( RunnablePassthrough.assign( diff --git a/libs/langchain/langchain/agents/xml/base.py b/libs/langchain/langchain/agents/xml/base.py index 572b50a8ca7..370c48ba022 100644 --- a/libs/langchain/langchain/agents/xml/base.py +++ b/libs/langchain/langchain/agents/xml/base.py @@ -112,6 +112,8 @@ def create_xml_agent( tools: Sequence[BaseTool], prompt: BasePromptTemplate, tools_renderer: ToolsRenderer = render_text_description, + *, + stop_sequence: Union[bool, List[str]] = True, ) -> Runnable: """Create an agent that uses XML to format its logic. @@ -123,6 +125,13 @@ def create_xml_agent( `agent_scratchpad`: contains previous agent actions and tool outputs. tools_renderer: This controls how the tools are converted into a string and then passed into the LLM. Default is `render_text_description`. + stop_sequence: bool or list of str. + If True, adds a stop token of "" to avoid hallucinates. + If False, does not add a stop token. + If a list of str, uses the provided list as the stop tokens. + + Default is True. You may to set this to False if the LLM you are using + does not support stop sequences. Returns: A Runnable sequence representing an agent. It takes as input all the same input @@ -201,7 +210,12 @@ def create_xml_agent( prompt = prompt.partial( tools=tools_renderer(list(tools)), ) - llm_with_stop = llm.bind(stop=[""]) + + if stop_sequence: + stop = [""] if stop_sequence is True else stop_sequence + llm_with_stop = llm.bind(stop=stop) + else: + llm_with_stop = llm agent = ( RunnablePassthrough.assign(