diff --git a/.scripts/community_split/libs/community/langchain_community/agent_toolkits/conversational_retrieval/openai_functions.py b/.scripts/community_split/libs/community/langchain_community/agent_toolkits/conversational_retrieval/openai_functions.py deleted file mode 100644 index e1c2c40bcfb88e..00000000000000 --- a/.scripts/community_split/libs/community/langchain_community/agent_toolkits/conversational_retrieval/openai_functions.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -from typing import Any, List, Optional, TYPE_CHECKING - -from langchain_core.language_models import BaseLanguageModel -from langchain_core.memory import BaseMemory -from langchain_core.messages import SystemMessage -from langchain_core.prompts.chat import MessagesPlaceholder -from langchain_core.tools import BaseTool - -if TYPE_CHECKING: - from langchain.agents.agent import AgentExecutor - - -def _get_default_system_message() -> SystemMessage: - return SystemMessage( - content=( - "Do your best to answer the questions. " - "Feel free to use any tools available to look up " - "relevant information, only if necessary" - ) - ) - -def create_conversational_retrieval_agent( - llm: BaseLanguageModel, - tools: List[BaseTool], - remember_intermediate_steps: bool = True, - memory_key: str = "chat_history", - system_message: Optional[SystemMessage] = None, - verbose: bool = False, - max_token_limit: int = 2000, - **kwargs: Any, -) -> AgentExecutor: - """A convenience method for creating a conversational retrieval agent. - - Args: - llm: The language model to use, should be ChatOpenAI - tools: A list of tools the agent has access to - remember_intermediate_steps: Whether the agent should remember intermediate - steps or not. Intermediate steps refer to prior action/observation - pairs from previous questions. The benefit of remembering these is if - there is relevant information in there, the agent can use it to answer - follow up questions. The downside is it will take up more tokens. - memory_key: The name of the memory key in the prompt. - system_message: The system message to use. By default, a basic one will - be used. - verbose: Whether or not the final AgentExecutor should be verbose or not, - defaults to False. - max_token_limit: The max number of tokens to keep around in memory. - Defaults to 2000. - - Returns: - An agent executor initialized appropriately - """ - from langchain.agents.agent import AgentExecutor - from langchain.agents.openai_functions_agent.agent_token_buffer_memory import ( - AgentTokenBufferMemory, - ) - from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent - from langchain.memory.token_buffer import ConversationTokenBufferMemory - - if remember_intermediate_steps: - memory: BaseMemory = AgentTokenBufferMemory( - memory_key=memory_key, llm=llm, max_token_limit=max_token_limit - ) - else: - memory = ConversationTokenBufferMemory( - memory_key=memory_key, - return_messages=True, - output_key="output", - llm=llm, - max_token_limit=max_token_limit, - ) - - _system_message = system_message or _get_default_system_message() - prompt = OpenAIFunctionsAgent.create_prompt( - system_message=_system_message, - extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)], - ) - agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) - return AgentExecutor( - agent=agent, - tools=tools, - memory=memory, - verbose=verbose, - return_intermediate_steps=remember_intermediate_steps, - **kwargs, - ) diff --git a/.scripts/community_split/libs/community/langchain_community/agent_toolkits/vectorstore/base.py b/.scripts/community_split/libs/community/langchain_community/agent_toolkits/vectorstore/base.py deleted file mode 100644 index 3e25a06b3065ba..00000000000000 --- a/.scripts/community_split/libs/community/langchain_community/agent_toolkits/vectorstore/base.py +++ /dev/null @@ -1,103 +0,0 @@ -"""VectorStore agent.""" -from __future__ import annotations -from typing import Any, Dict, Optional, TYPE_CHECKING - -from langchain_core.callbacks import BaseCallbackManager -from langchain_core.language_models import BaseLanguageModel - -from langchain_community.agent_toolkits.vectorstore.prompt import PREFIX, ROUTER_PREFIX -from langchain_community.agent_toolkits.vectorstore.toolkit import ( - VectorStoreRouterToolkit, - VectorStoreToolkit, -) - -if TYPE_CHECKING: - from langchain.agents.agent import AgentExecutor - - -def create_vectorstore_agent( - llm: BaseLanguageModel, - toolkit: VectorStoreToolkit, - callback_manager: Optional[BaseCallbackManager] = None, - prefix: str = PREFIX, - verbose: bool = False, - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - **kwargs: Any, -) -> AgentExecutor: - """Construct a VectorStore agent from an LLM and tools. - - Args: - llm (BaseLanguageModel): LLM that will be used by the agent - toolkit (VectorStoreToolkit): Set of tools for the agent - callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] - prefix (str, optional): The prefix prompt for the agent. If not provided uses default PREFIX. - verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] - agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] - **kwargs: Additional named parameters to pass to the ZeroShotAgent. - - Returns: - AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response - """ # noqa: E501 - from langchain.agents.agent import AgentExecutor - from langchain.agents.mrkl.base import ZeroShotAgent - from langchain.chains.llm import LLMChain - tools = toolkit.get_tools() - prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) - llm_chain = LLMChain( - llm=llm, - prompt=prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) - return AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - **(agent_executor_kwargs or {}), - ) - - -def create_vectorstore_router_agent( - llm: BaseLanguageModel, - toolkit: VectorStoreRouterToolkit, - callback_manager: Optional[BaseCallbackManager] = None, - prefix: str = ROUTER_PREFIX, - verbose: bool = False, - agent_executor_kwargs: Optional[Dict[str, Any]] = None, - **kwargs: Any, -) -> AgentExecutor: - """Construct a VectorStore router agent from an LLM and tools. - - Args: - llm (BaseLanguageModel): LLM that will be used by the agent - toolkit (VectorStoreRouterToolkit): Set of tools for the agent which have routing capability with multiple vector stores - callback_manager (Optional[BaseCallbackManager], optional): Object to handle the callback [ Defaults to None. ] - prefix (str, optional): The prefix prompt for the router agent. If not provided uses default ROUTER_PREFIX. - verbose (bool, optional): If you want to see the content of the scratchpad. [ Defaults to False ] - agent_executor_kwargs (Optional[Dict[str, Any]], optional): If there is any other parameter you want to send to the agent. [ Defaults to None ] - **kwargs: Additional named parameters to pass to the ZeroShotAgent. - - Returns: - AgentExecutor: Returns a callable AgentExecutor object. Either you can call it or use run method with the query to get the response. - """ # noqa: E501 - from langchain.agents.agent import AgentExecutor - from langchain.agents.mrkl.base import ZeroShotAgent - from langchain.chains.llm import LLMChain - tools = toolkit.get_tools() - prompt = ZeroShotAgent.create_prompt(tools, prefix=prefix) - llm_chain = LLMChain( - llm=llm, - prompt=prompt, - callback_manager=callback_manager, - ) - tool_names = [tool.name for tool in tools] - agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) - return AgentExecutor.from_agent_and_tools( - agent=agent, - tools=tools, - callback_manager=callback_manager, - verbose=verbose, - **(agent_executor_kwargs or {}), - ) diff --git a/libs/community/langchain_community/agent_toolkits/__init__.py b/libs/community/langchain_community/agent_toolkits/__init__.py index 9501eb5db14655..b39e2751f7e9dd 100644 --- a/libs/community/langchain_community/agent_toolkits/__init__.py +++ b/libs/community/langchain_community/agent_toolkits/__init__.py @@ -18,9 +18,6 @@ from langchain_community.agent_toolkits.azure_cognitive_services import ( AzureCognitiveServicesToolkit, ) -from langchain_community.agent_toolkits.conversational_retrieval.openai_functions import ( # noqa: E501 - create_conversational_retrieval_agent, -) from langchain_community.agent_toolkits.file_management.toolkit import ( FileManagementToolkit, ) @@ -74,5 +71,4 @@ "create_pbi_chat_agent", "create_spark_sql_agent", "create_sql_agent", - "create_conversational_retrieval_agent", ] diff --git a/libs/community/tests/unit_tests/agent_toolkits/__init__.py b/libs/community/tests/unit_tests/agent_toolkits/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/libs/community/tests/unit_tests/agent_toolkits/test_imports.py b/libs/community/tests/unit_tests/agent_toolkits/test_imports.py new file mode 100644 index 00000000000000..c6557b7391ee80 --- /dev/null +++ b/libs/community/tests/unit_tests/agent_toolkits/test_imports.py @@ -0,0 +1,33 @@ +from langchain_community.agent_toolkits import __all__ + +EXPECTED_ALL = [ + "AINetworkToolkit", + "AmadeusToolkit", + "AzureCognitiveServicesToolkit", + "FileManagementToolkit", + "GmailToolkit", + "JiraToolkit", + "JsonToolkit", + "MultionToolkit", + "NasaToolkit", + "NLAToolkit", + "O365Toolkit", + "OpenAPIToolkit", + "PlayWrightBrowserToolkit", + "PowerBIToolkit", + "SlackToolkit", + "SteamToolkit", + "SQLDatabaseToolkit", + "SparkSQLToolkit", + "ZapierToolkit", + "create_json_agent", + "create_openapi_agent", + "create_pbi_agent", + "create_pbi_chat_agent", + "create_spark_sql_agent", + "create_sql_agent", +] + + +def test_all_imports() -> None: + assert set(__all__) == set(EXPECTED_ALL)