From ae66bcbc104cfbe3c44d99c2097e5070f891b0ea Mon Sep 17 00:00:00 2001 From: Leonid Ganeline Date: Fri, 9 Feb 2024 12:47:41 -0800 Subject: [PATCH] core[patch]: docstring update (#16813) - added missed docstrings - formated docstrings to consistent form --- libs/core/langchain_core/messages/__init__.py | 10 ++- libs/core/langchain_core/messages/ai.py | 4 +- libs/core/langchain_core/messages/base.py | 13 ++- libs/core/langchain_core/messages/chat.py | 4 +- libs/core/langchain_core/messages/function.py | 4 +- libs/core/langchain_core/messages/human.py | 4 +- libs/core/langchain_core/messages/system.py | 4 +- libs/core/langchain_core/messages/tool.py | 4 +- .../langchain_core/outputs/chat_generation.py | 2 +- .../core/langchain_core/outputs/generation.py | 2 +- libs/core/langchain_core/prompts/chat.py | 2 +- libs/core/langchain_core/prompts/pipeline.py | 2 +- libs/core/langchain_core/runnables/base.py | 22 ++--- libs/core/langchain_core/runnables/branch.py | 6 +- .../langchain_core/runnables/configurable.py | 10 +-- .../langchain_core/runnables/fallbacks.py | 12 +-- libs/core/langchain_core/runnables/graph.py | 6 ++ libs/core/langchain_core/runnables/history.py | 8 +- .../langchain_core/runnables/passthrough.py | 10 +-- libs/core/langchain_core/runnables/retry.py | 2 +- libs/core/langchain_core/runnables/router.py | 6 +- libs/core/langchain_core/runnables/schema.py | 20 ++--- libs/core/langchain_core/runnables/utils.py | 18 ++-- libs/core/langchain_core/tools.py | 4 +- .../core/langchain_core/tracers/evaluation.py | 2 +- libs/core/langchain_core/tracers/langchain.py | 2 +- .../langchain_core/tracers/langchain_v1.py | 2 +- .../core/langchain_core/tracers/log_stream.py | 6 +- .../langchain_core/tracers/root_listeners.py | 2 +- .../langchain_core/tracers/run_collector.py | 2 +- libs/core/langchain_core/utils/formatting.py | 2 +- .../__snapshots__/test_runnable.ambr | 84 +++++++++---------- .../unit_tests/runnables/test_runnable.py | 12 +-- 33 files changed, 162 insertions(+), 131 deletions(-) diff --git a/libs/core/langchain_core/messages/__init__.py b/libs/core/langchain_core/messages/__init__.py index 86bea028051fb1..e51a835aee26bc 100644 --- a/libs/core/langchain_core/messages/__init__.py +++ b/libs/core/langchain_core/messages/__init__.py @@ -22,7 +22,7 @@ def get_buffer_string( messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" ) -> str: - """Convert sequence of Messages to strings and concatenate them into one string. + """Convert a sequence of Messages to strings and concatenate them into one string. Args: messages: Messages to be converted to strings. @@ -111,6 +111,14 @@ def messages_from_dict(messages: Sequence[dict]) -> List[BaseMessage]: def message_chunk_to_message(chunk: BaseMessageChunk) -> BaseMessage: + """Convert a message chunk to a message. + + Args: + chunk: Message chunk to convert. + + Returns: + Message. + """ if not isinstance(chunk, BaseMessageChunk): return chunk # chunk classes always have the equivalent non-chunk class as their first parent diff --git a/libs/core/langchain_core/messages/ai.py b/libs/core/langchain_core/messages/ai.py index 6c9633f02e42bc..c667aa54e9526c 100644 --- a/libs/core/langchain_core/messages/ai.py +++ b/libs/core/langchain_core/messages/ai.py @@ -8,7 +8,7 @@ class AIMessage(BaseMessage): - """A Message from an AI.""" + """Message from an AI.""" example: bool = False """Whether this Message is being passed in to the model as part of an example @@ -27,7 +27,7 @@ def get_lc_namespace(cls) -> List[str]: class AIMessageChunk(AIMessage, BaseMessageChunk): - """A Message chunk from an AI.""" + """Message chunk from an AI.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/messages/base.py b/libs/core/langchain_core/messages/base.py index bf1c74f490ddba..a4af8e558f35fe 100644 --- a/libs/core/langchain_core/messages/base.py +++ b/libs/core/langchain_core/messages/base.py @@ -12,7 +12,7 @@ class BaseMessage(Serializable): - """The base abstract Message class. + """Base abstract Message class. Messages are the inputs and outputs of ChatModels. """ @@ -96,7 +96,7 @@ def merge_content( class BaseMessageChunk(BaseMessage): - """A Message chunk, which can be concatenated with other Message chunks.""" + """Message chunk, which can be concatenated with other Message chunks.""" @classmethod def get_lc_namespace(cls) -> List[str]: @@ -195,6 +195,15 @@ def messages_to_dict(messages: Sequence[BaseMessage]) -> List[dict]: def get_msg_title_repr(title: str, *, bold: bool = False) -> str: + """Get a title representation for a message. + + Args: + title: The title. + bold: Whether to bold the title. + + Returns: + The title representation. + """ padded = " " + title + " " sep_len = (80 - len(padded)) // 2 sep = "=" * sep_len diff --git a/libs/core/langchain_core/messages/chat.py b/libs/core/langchain_core/messages/chat.py index bd89094ca4eb7a..3c7ed975b65db3 100644 --- a/libs/core/langchain_core/messages/chat.py +++ b/libs/core/langchain_core/messages/chat.py @@ -8,7 +8,7 @@ class ChatMessage(BaseMessage): - """A Message that can be assigned an arbitrary speaker (i.e. role).""" + """Message that can be assigned an arbitrary speaker (i.e. role).""" role: str """The speaker / role of the Message.""" @@ -25,7 +25,7 @@ def get_lc_namespace(cls) -> List[str]: class ChatMessageChunk(ChatMessage, BaseMessageChunk): - """A Chat Message chunk.""" + """Chat Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/messages/function.py b/libs/core/langchain_core/messages/function.py index 32f89f99f1f29b..e852aa37276776 100644 --- a/libs/core/langchain_core/messages/function.py +++ b/libs/core/langchain_core/messages/function.py @@ -8,7 +8,7 @@ class FunctionMessage(BaseMessage): - """A Message for passing the result of executing a function back to a model.""" + """Message for passing the result of executing a function back to a model.""" name: str """The name of the function that was executed.""" @@ -25,7 +25,7 @@ def get_lc_namespace(cls) -> List[str]: class FunctionMessageChunk(FunctionMessage, BaseMessageChunk): - """A Function Message chunk.""" + """Function Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/messages/human.py b/libs/core/langchain_core/messages/human.py index 976dc7afc54388..b89860a562853c 100644 --- a/libs/core/langchain_core/messages/human.py +++ b/libs/core/langchain_core/messages/human.py @@ -4,7 +4,7 @@ class HumanMessage(BaseMessage): - """A Message from a human.""" + """Message from a human.""" example: bool = False """Whether this Message is being passed in to the model as part of an example @@ -23,7 +23,7 @@ def get_lc_namespace(cls) -> List[str]: class HumanMessageChunk(HumanMessage, BaseMessageChunk): - """A Human Message chunk.""" + """Human Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/messages/system.py b/libs/core/langchain_core/messages/system.py index 18e3753296cee0..c86a60f91ffd07 100644 --- a/libs/core/langchain_core/messages/system.py +++ b/libs/core/langchain_core/messages/system.py @@ -4,7 +4,7 @@ class SystemMessage(BaseMessage): - """A Message for priming AI behavior, usually passed in as the first of a sequence + """Message for priming AI behavior, usually passed in as the first of a sequence of input messages. """ @@ -20,7 +20,7 @@ def get_lc_namespace(cls) -> List[str]: class SystemMessageChunk(SystemMessage, BaseMessageChunk): - """A System Message chunk.""" + """System Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/messages/tool.py b/libs/core/langchain_core/messages/tool.py index 27d15739c74d87..a83894a10f73cc 100644 --- a/libs/core/langchain_core/messages/tool.py +++ b/libs/core/langchain_core/messages/tool.py @@ -8,7 +8,7 @@ class ToolMessage(BaseMessage): - """A Message for passing the result of executing a tool back to a model.""" + """Message for passing the result of executing a tool back to a model.""" tool_call_id: str """Tool call that this message is responding to.""" @@ -25,7 +25,7 @@ def get_lc_namespace(cls) -> List[str]: class ToolMessageChunk(ToolMessage, BaseMessageChunk): - """A Tool Message chunk.""" + """Tool Message chunk.""" # Ignoring mypy re-assignment here since we're overriding the value # to make sure that the chunk variant can be discriminated from the diff --git a/libs/core/langchain_core/outputs/chat_generation.py b/libs/core/langchain_core/outputs/chat_generation.py index b7bd6042a2c1eb..49dc96b38197ca 100644 --- a/libs/core/langchain_core/outputs/chat_generation.py +++ b/libs/core/langchain_core/outputs/chat_generation.py @@ -35,7 +35,7 @@ def get_lc_namespace(cls) -> List[str]: class ChatGenerationChunk(ChatGeneration): - """A ChatGeneration chunk, which can be concatenated with other + """ChatGeneration chunk, which can be concatenated with other ChatGeneration chunks. Attributes: diff --git a/libs/core/langchain_core/outputs/generation.py b/libs/core/langchain_core/outputs/generation.py index 3f0a79ecb10b0c..34e219771d125a 100644 --- a/libs/core/langchain_core/outputs/generation.py +++ b/libs/core/langchain_core/outputs/generation.py @@ -32,7 +32,7 @@ def get_lc_namespace(cls) -> List[str]: class GenerationChunk(Generation): - """A Generation chunk, which can be concatenated with other Generation chunks.""" + """Generation chunk, which can be concatenated with other Generation chunks.""" @classmethod def get_lc_namespace(cls) -> List[str]: diff --git a/libs/core/langchain_core/prompts/chat.py b/libs/core/langchain_core/prompts/chat.py index 105daba2dfb476..605470c0236be5 100644 --- a/libs/core/langchain_core/prompts/chat.py +++ b/libs/core/langchain_core/prompts/chat.py @@ -556,7 +556,7 @@ def pretty_print(self) -> None: class ChatPromptTemplate(BaseChatPromptTemplate): - """A prompt template for chat models. + """Prompt template for chat models. Use to create flexible templated prompts for chat models. diff --git a/libs/core/langchain_core/prompts/pipeline.py b/libs/core/langchain_core/prompts/pipeline.py index 48f5ec45821e5f..5c0cc00402f613 100644 --- a/libs/core/langchain_core/prompts/pipeline.py +++ b/libs/core/langchain_core/prompts/pipeline.py @@ -11,7 +11,7 @@ def _get_inputs(inputs: dict, input_variables: List[str]) -> dict: class PipelinePromptTemplate(BasePromptTemplate): - """A prompt template for composing multiple prompt templates together. + """Prompt template for composing multiple prompt templates together. This can be useful when you want to reuse parts of prompts. A PipelinePrompt consists of two main parts: diff --git a/libs/core/langchain_core/runnables/base.py b/libs/core/langchain_core/runnables/base.py index 42f5f33c31cdd2..783d8bbbf950fc 100644 --- a/libs/core/langchain_core/runnables/base.py +++ b/libs/core/langchain_core/runnables/base.py @@ -1632,7 +1632,7 @@ async def _atransform_stream_with_config( class RunnableSerializable(Serializable, Runnable[Input, Output]): - """A Runnable that can be serialized to JSON.""" + """Runnable that can be serialized to JSON.""" name: Optional[str] = None """The name of the runnable. Used for debugging and tracing.""" @@ -1752,7 +1752,7 @@ def _seq_output_schema( class RunnableSequence(RunnableSerializable[Input, Output]): - """A sequence of runnables, where the output of each is the input of the next. + """Sequence of Runnables, where the output of each is the input of the next. RunnableSequence is the most important composition operator in LangChain as it is used in virtually every chain. @@ -1764,7 +1764,7 @@ class RunnableSequence(RunnableSerializable[Input, Output]): The default implementations of `batch` and `abatch` utilize threadpools and asyncio gather and will be faster than naive invocation of invoke or ainvoke - for IO bound runnables. + for IO bound Runnables. Batching is implemented by invoking the batch method on each component of the RunnableSequence in order. @@ -2451,11 +2451,11 @@ async def input_aiter() -> AsyncIterator[Input]: class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]): - """A runnable that runs a mapping of runnables in parallel, and returns a mapping + """Runnable that runs a mapping of Runnables in parallel, and returns a mapping of their outputs. RunnableParallel is one of the two main composition primitives for the LCEL, - alongside RunnableSequence. It invokes runnables concurrently, providing the same + alongside RunnableSequence. It invokes Runnables concurrently, providing the same input to each. A RunnableParallel can be instantiated directly or by using a dict literal within a @@ -2882,7 +2882,7 @@ async def input_aiter() -> AsyncIterator[Input]: class RunnableGenerator(Runnable[Input, Output]): - """A runnable that runs a generator function. + """Runnable that runs a generator function. RunnableGenerators can be instantiated directly or by using a generator within a sequence. @@ -3730,7 +3730,7 @@ async def input_aiter() -> AsyncIterator[Input]: class RunnableEachBase(RunnableSerializable[List[Input], List[Output]]): - """A runnable that delegates calls to another runnable + """Runnable that delegates calls to another Runnable with each element of the input sequence. Use only if creating a new RunnableEach subclass with different __init__ args. @@ -3838,13 +3838,13 @@ async def astream_events( class RunnableEach(RunnableEachBase[Input, Output]): - """A runnable that delegates calls to another runnable + """Runnable that delegates calls to another Runnable with each element of the input sequence. It allows you to call multiple inputs with the bounded Runnable. RunnableEach makes it easy to run multiple inputs for the runnable. - In the below example, we associate and run three three inputs + In the below example, we associate and run three inputs with a Runnable: .. code-block:: python @@ -3910,7 +3910,7 @@ def with_listeners( class RunnableBindingBase(RunnableSerializable[Input, Output]): - """A runnable that delegates calls to another runnable with a set of kwargs. + """Runnable that delegates calls to another Runnable with a set of kwargs. Use only if creating a new RunnableBinding subclass with different __init__ args. @@ -4189,7 +4189,7 @@ async def atransform( class RunnableBinding(RunnableBindingBase[Input, Output]): - """Wrap a runnable with additional functionality. + """Wrap a Runnable with additional functionality. A RunnableBinding can be thought of as a "runnable decorator" that preserves the essential features of Runnable; i.e., batching, streaming, diff --git a/libs/core/langchain_core/runnables/branch.py b/libs/core/langchain_core/runnables/branch.py index fa60a58f16055e..ef0e3eec64d1dd 100644 --- a/libs/core/langchain_core/runnables/branch.py +++ b/libs/core/langchain_core/runnables/branch.py @@ -38,13 +38,13 @@ class RunnableBranch(RunnableSerializable[Input, Output]): - """A Runnable that selects which branch to run based on a condition. + """Runnable that selects which branch to run based on a condition. - The runnable is initialized with a list of (condition, runnable) pairs and + The Runnable is initialized with a list of (condition, Runnable) pairs and a default branch. When operating on an input, the first condition that evaluates to True is - selected, and the corresponding runnable is run on the input. + selected, and the corresponding Runnable is run on the input. If no condition evaluates to True, the default branch is run on the input. diff --git a/libs/core/langchain_core/runnables/configurable.py b/libs/core/langchain_core/runnables/configurable.py index 4c795a7b5bda15..ea4ed30e66c154 100644 --- a/libs/core/langchain_core/runnables/configurable.py +++ b/libs/core/langchain_core/runnables/configurable.py @@ -42,7 +42,7 @@ class DynamicRunnable(RunnableSerializable[Input, Output]): - """A Serializable Runnable that can be dynamically configured.""" + """Serializable Runnable that can be dynamically configured.""" default: RunnableSerializable[Input, Output] @@ -220,7 +220,7 @@ async def atransform( class RunnableConfigurableFields(DynamicRunnable[Input, Output]): - """A Runnable that can be dynamically configured.""" + """Runnable that can be dynamically configured.""" fields: Dict[str, AnyConfigurableField] @@ -297,7 +297,7 @@ def _prepare( # Before Python 3.11 native StrEnum is not available class StrEnum(str, enum.Enum): - """A string enum.""" + """String enum.""" pass @@ -313,10 +313,10 @@ class StrEnum(str, enum.Enum): class RunnableConfigurableAlternatives(DynamicRunnable[Input, Output]): - """A Runnable that can be dynamically configured. + """Runnable that can be dynamically configured. A RunnableConfigurableAlternatives should be initiated using the - `configurable_alternatives` method of a runnable or can be + `configurable_alternatives` method of a Runnable or can be initiated directly as well. Here is an example of using a RunnableConfigurableAlternatives that uses diff --git a/libs/core/langchain_core/runnables/fallbacks.py b/libs/core/langchain_core/runnables/fallbacks.py index bc7128c1bf3f24..9ecbe6cdfc6448 100644 --- a/libs/core/langchain_core/runnables/fallbacks.py +++ b/libs/core/langchain_core/runnables/fallbacks.py @@ -39,20 +39,20 @@ class RunnableWithFallbacks(RunnableSerializable[Input, Output]): - """A Runnable that can fallback to other Runnables if it fails. + """Runnable that can fallback to other Runnables if it fails. External APIs (e.g., APIs for a language model) may at times experience degraded performance or even downtime. - In these cases, it can be useful to have a fallback runnable that can be - used in place of the original runnable (e.g., fallback to another LLM provider). + In these cases, it can be useful to have a fallback Runnable that can be + used in place of the original Runnable (e.g., fallback to another LLM provider). - Fallbacks can be defined at the level of a single runnable, or at the level - of a chain of runnables. Fallbacks are tried in order until one succeeds or + Fallbacks can be defined at the level of a single Runnable, or at the level + of a chain of Runnables. Fallbacks are tried in order until one succeeds or all fail. While you can instantiate a ``RunnableWithFallbacks`` directly, it is usually - more convenient to use the ``with_fallbacks`` method on a runnable. + more convenient to use the ``with_fallbacks`` method on a Runnable. Example: diff --git a/libs/core/langchain_core/runnables/graph.py b/libs/core/langchain_core/runnables/graph.py index b172b71239c9de..f6b6b8b3ba32da 100644 --- a/libs/core/langchain_core/runnables/graph.py +++ b/libs/core/langchain_core/runnables/graph.py @@ -12,17 +12,23 @@ class Edge(NamedTuple): + """Edge in a graph.""" + source: str target: str class Node(NamedTuple): + """Node in a graph.""" + id: str data: Union[Type[BaseModel], RunnableType] @dataclass class Graph: + """Graph of nodes and edges.""" + nodes: Dict[str, Node] = field(default_factory=dict) edges: List[Edge] = field(default_factory=list) diff --git a/libs/core/langchain_core/runnables/history.py b/libs/core/langchain_core/runnables/history.py index f83de39cee1bf3..2134f2f88f005f 100644 --- a/libs/core/langchain_core/runnables/history.py +++ b/libs/core/langchain_core/runnables/history.py @@ -35,21 +35,21 @@ class RunnableWithMessageHistory(RunnableBindingBase): - """A runnable that manages chat message history for another runnable. + """Runnable that manages chat message history for another Runnable. A chat message history is a sequence of messages that represent a conversation. - RunnableWithMessageHistory wraps another runnable and manages the chat message + RunnableWithMessageHistory wraps another Runnable and manages the chat message history for it; it is responsible for reading and updating the chat message history. - The formats supports for the inputs and outputs of the wrapped runnable + The formats supports for the inputs and outputs of the wrapped Runnable are described below. RunnableWithMessageHistory must always be called with a config that contains the appropriate parameters for the chat message history factory. - By default the runnable is expected to take a single configuration parameter + By default the Runnable is expected to take a single configuration parameter called `session_id` which is a string. This parameter is used to create a new or look up an existing chat message history that matches the given session_id. diff --git a/libs/core/langchain_core/runnables/passthrough.py b/libs/core/langchain_core/runnables/passthrough.py index 1f4367d9d5ead0..3e9277a7a2d88d 100644 --- a/libs/core/langchain_core/runnables/passthrough.py +++ b/libs/core/langchain_core/runnables/passthrough.py @@ -48,23 +48,23 @@ def identity(x: Other) -> Other: - """An identity function""" + """Identity function""" return x async def aidentity(x: Other) -> Other: - """An async identity function""" + """Async identity function""" return x class RunnablePassthrough(RunnableSerializable[Other, Other]): - """A runnable to passthrough inputs unchanged or with additional keys. + """Runnable to passthrough inputs unchanged or with additional keys. This runnable behaves almost like the identity function, except that it can be configured to add additional keys to the output, if the input is a dict. - The examples below demonstrate this runnable works using a few simple + The examples below demonstrate this Runnable works using a few simple chains. The chains rely on simple lambdas to make the examples easy to execute and experiment with. @@ -572,7 +572,7 @@ async def input_aiter() -> AsyncIterator[Dict[str, Any]]: class RunnablePick(RunnableSerializable[Dict[str, Any], Dict[str, Any]]): """ - A runnable that picks keys from Dict[str, Any] inputs. + Runnable that picks keys from Dict[str, Any] inputs. """ keys: Union[str, List[str]] diff --git a/libs/core/langchain_core/runnables/retry.py b/libs/core/langchain_core/runnables/retry.py index 36a508776ce62d..bb1d08eaf1bc1f 100644 --- a/libs/core/langchain_core/runnables/retry.py +++ b/libs/core/langchain_core/runnables/retry.py @@ -37,7 +37,7 @@ class RunnableRetry(RunnableBindingBase[Input, Output]): """Retry a Runnable if it fails. - A RunnableRetry helps can be used to add retry logic to any object + RunnableRetry can be used to add retry logic to any object that subclasses the base Runnable. Such retries are especially useful for network calls that may fail diff --git a/libs/core/langchain_core/runnables/router.py b/libs/core/langchain_core/runnables/router.py index eacdfb378139f3..6277792b9e3eb2 100644 --- a/libs/core/langchain_core/runnables/router.py +++ b/libs/core/langchain_core/runnables/router.py @@ -34,7 +34,7 @@ class RouterInput(TypedDict): - """A Router input. + """Router input. Attributes: key: The key to route on. @@ -47,8 +47,8 @@ class RouterInput(TypedDict): class RouterRunnable(RunnableSerializable[RouterInput, Output]): """ - A runnable that routes to a set of runnables based on Input['key']. - Returns the output of the selected runnable. + Runnable that routes to a set of Runnables based on Input['key']. + Returns the output of the selected Runnable. """ runnables: Mapping[str, Runnable[Any, Output]] diff --git a/libs/core/langchain_core/runnables/schema.py b/libs/core/langchain_core/runnables/schema.py index b2891b10b11c62..db4ad13a34ac63 100644 --- a/libs/core/langchain_core/runnables/schema.py +++ b/libs/core/langchain_core/runnables/schema.py @@ -12,32 +12,32 @@ class EventData(TypedDict, total=False): input: Any """The input passed to the runnable that generated the event. - Inputs will sometimes be available at the *START* of the runnable, and - sometimes at the *END* of the runnable. + Inputs will sometimes be available at the *START* of the Runnable, and + sometimes at the *END* of the Runnable. - If a runnable is able to stream its inputs, then its input by definition - won't be known until the *END* of the runnable when it has finished streaming + If a Runnable is able to stream its inputs, then its input by definition + won't be known until the *END* of the Runnable when it has finished streaming its inputs. """ output: Any - """The output of the runnable that generated the event. + """The output of the Runnable that generated the event. - Outputs will only be available at the *END* of the runnable. + Outputs will only be available at the *END* of the Runnable. - For most runnables, this field can be inferred from the `chunk` field, - though there might be some exceptions for special cased runnables (e.g., like + For most Runnables, this field can be inferred from the `chunk` field, + though there might be some exceptions for special cased Runnables (e.g., like chat models), which may return more information. """ chunk: Any """A streaming chunk from the output that generated the event. chunks support addition in general, and adding them up should result - in the output of the runnable that generated the event. + in the output of the Runnable that generated the event. """ class StreamEvent(TypedDict): - """A streaming event. + """Streaming event. Schema of a streaming event which is produced from the astream_events method. diff --git a/libs/core/langchain_core/runnables/utils.py b/libs/core/langchain_core/runnables/utils.py index cb8f21e29ce0fc..59d2c862037016 100644 --- a/libs/core/langchain_core/runnables/utils.py +++ b/libs/core/langchain_core/runnables/utils.py @@ -46,7 +46,15 @@ async def gated_coro(semaphore: asyncio.Semaphore, coro: Coroutine) -> Any: async def gather_with_concurrency(n: Union[int, None], *coros: Coroutine) -> list: - """Gather coroutines with a limit on the number of concurrent coroutines.""" + """Gather coroutines with a limit on the number of concurrent coroutines. + + Args: + n: The number of coroutines to run concurrently. + coros: The coroutines to run. + + Returns: + The results of the coroutines. + """ if n is None: return await asyncio.gather(*coros) @@ -344,7 +352,7 @@ async def aadd(addables: AsyncIterable[Addable]) -> Optional[Addable]: class ConfigurableField(NamedTuple): - """A field that can be configured by the user.""" + """Field that can be configured by the user.""" id: str @@ -358,7 +366,7 @@ def __hash__(self) -> int: class ConfigurableFieldSingleOption(NamedTuple): - """A field that can be configured by the user with a default value.""" + """Field that can be configured by the user with a default value.""" id: str options: Mapping[str, Any] @@ -373,7 +381,7 @@ def __hash__(self) -> int: class ConfigurableFieldMultiOption(NamedTuple): - """A field that can be configured by the user with multiple default values.""" + """Field that can be configured by the user with multiple default values.""" id: str options: Mapping[str, Any] @@ -393,7 +401,7 @@ def __hash__(self) -> int: class ConfigurableFieldSpec(NamedTuple): - """A field that can be configured by the user. It is a specification of a field.""" + """Field that can be configured by the user. It is a specification of a field.""" id: str annotation: Any diff --git a/libs/core/langchain_core/tools.py b/libs/core/langchain_core/tools.py index 7e1d82e226ed3a..baee390ab79eb6 100644 --- a/libs/core/langchain_core/tools.py +++ b/libs/core/langchain_core/tools.py @@ -99,10 +99,10 @@ def create_schema_from_function( class ToolException(Exception): - """An optional exception that tool throws when execution error occurs. + """Optional exception that tool throws when execution error occurs. When this exception is thrown, the agent will not stop working, - but will handle the exception according to the handle_tool_error + but it will handle the exception according to the handle_tool_error variable of the tool, and the processing result will be returned to the agent as observation, and printed in red on the console. """ diff --git a/libs/core/langchain_core/tracers/evaluation.py b/libs/core/langchain_core/tracers/evaluation.py index b7e5dc8bc0d76b..3caeda2b0c0f49 100644 --- a/libs/core/langchain_core/tracers/evaluation.py +++ b/libs/core/langchain_core/tracers/evaluation.py @@ -31,7 +31,7 @@ def wait_for_all_evaluators() -> None: class EvaluatorCallbackHandler(BaseTracer): - """A tracer that runs a run evaluator whenever a run is persisted. + """Tracer that runs a run evaluator whenever a run is persisted. Parameters ---------- diff --git a/libs/core/langchain_core/tracers/langchain.py b/libs/core/langchain_core/tracers/langchain.py index 91b54224bd2c5b..080946d379482e 100644 --- a/libs/core/langchain_core/tracers/langchain.py +++ b/libs/core/langchain_core/tracers/langchain.py @@ -63,7 +63,7 @@ def _get_executor() -> ThreadPoolExecutor: class LangChainTracer(BaseTracer): - """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" + """Implementation of the SharedTracer that POSTS to the LangChain endpoint.""" def __init__( self, diff --git a/libs/core/langchain_core/tracers/langchain_v1.py b/libs/core/langchain_core/tracers/langchain_v1.py index 38a876b44537f6..a5b1fcbf5e042c 100644 --- a/libs/core/langchain_core/tracers/langchain_v1.py +++ b/libs/core/langchain_core/tracers/langchain_v1.py @@ -37,7 +37,7 @@ def _get_endpoint() -> str: @deprecated("0.1.0", alternative="LangChainTracer", removal="0.2.0") class LangChainTracerV1(BaseTracer): - """An implementation of the SharedTracer that POSTS to the langchain endpoint.""" + """Implementation of the SharedTracer that POSTS to the langchain endpoint.""" def __init__(self, **kwargs: Any) -> None: """Initialize the LangChain tracer.""" diff --git a/libs/core/langchain_core/tracers/log_stream.py b/libs/core/langchain_core/tracers/log_stream.py index db56626451649e..b3cadb55a6af7b 100644 --- a/libs/core/langchain_core/tracers/log_stream.py +++ b/libs/core/langchain_core/tracers/log_stream.py @@ -88,7 +88,7 @@ class RunState(TypedDict): class RunLogPatch: - """A patch to the run log.""" + """Patch to the run log.""" ops: List[Dict[str, Any]] """List of jsonpatch operations, which describe how to create the run state @@ -121,7 +121,7 @@ def __eq__(self, other: object) -> bool: class RunLog(RunLogPatch): - """A run log.""" + """Run log.""" state: RunState """Current state of the log, obtained from applying all ops in sequence.""" @@ -159,7 +159,7 @@ def __eq__(self, other: object) -> bool: class LogStreamCallbackHandler(BaseTracer): - """A tracer that streams run logs to a stream.""" + """Tracer that streams run logs to a stream.""" def __init__( self, diff --git a/libs/core/langchain_core/tracers/root_listeners.py b/libs/core/langchain_core/tracers/root_listeners.py index a81db8af8f9cf6..7db7407d268f23 100644 --- a/libs/core/langchain_core/tracers/root_listeners.py +++ b/libs/core/langchain_core/tracers/root_listeners.py @@ -12,7 +12,7 @@ class RootListenersTracer(BaseTracer): - """A tracer that calls listeners on run start, end, and error.""" + """Tracer that calls listeners on run start, end, and error.""" def __init__( self, diff --git a/libs/core/langchain_core/tracers/run_collector.py b/libs/core/langchain_core/tracers/run_collector.py index 995ac7cc683dcd..9442daa413201b 100644 --- a/libs/core/langchain_core/tracers/run_collector.py +++ b/libs/core/langchain_core/tracers/run_collector.py @@ -9,7 +9,7 @@ class RunCollectorCallbackHandler(BaseTracer): """ - A tracer that collects all nested runs in a list. + Tracer that collects all nested runs in a list. This tracer is useful for inspection and evaluation purposes. diff --git a/libs/core/langchain_core/utils/formatting.py b/libs/core/langchain_core/utils/formatting.py index 83fe2aba9277b9..d2e5376b6837c6 100644 --- a/libs/core/langchain_core/utils/formatting.py +++ b/libs/core/langchain_core/utils/formatting.py @@ -4,7 +4,7 @@ class StrictFormatter(Formatter): - """A subclass of formatter that checks for extra keys.""" + """Formatter that checks for extra keys.""" def vformat( self, format_string: str, args: Sequence, kwargs: Mapping[str, Any] diff --git a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr index b149765e88765c..a717b95eecae2e 100644 --- a/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr +++ b/libs/core/tests/unit_tests/runnables/__snapshots__/test_runnable.ambr @@ -1686,7 +1686,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -1734,7 +1734,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -1829,7 +1829,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -1877,7 +1877,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -1948,7 +1948,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -1993,7 +1993,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2084,7 +2084,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2132,7 +2132,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2227,7 +2227,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2275,7 +2275,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2346,7 +2346,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -2391,7 +2391,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2466,7 +2466,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2514,7 +2514,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2562,7 +2562,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2610,7 +2610,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2659,7 +2659,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -2704,7 +2704,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2767,7 +2767,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2815,7 +2815,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2910,7 +2910,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -2958,7 +2958,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3029,7 +3029,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -3074,7 +3074,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3137,7 +3137,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3185,7 +3185,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3280,7 +3280,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3328,7 +3328,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3399,7 +3399,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -3444,7 +3444,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3499,7 +3499,7 @@ dict({ 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3547,7 +3547,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3642,7 +3642,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3690,7 +3690,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3772,7 +3772,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -3817,7 +3817,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3899,7 +3899,7 @@ ]), 'definitions': dict({ 'AIMessage': dict({ - 'description': 'A Message from an AI.', + 'description': 'Message from an AI.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3947,7 +3947,7 @@ 'type': 'object', }), 'ChatMessage': dict({ - 'description': 'A Message that can be assigned an arbitrary speaker (i.e. role).', + 'description': 'Message that can be assigned an arbitrary speaker (i.e. role).', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -3995,7 +3995,7 @@ 'type': 'object', }), 'FunctionMessage': dict({ - 'description': 'A Message for passing the result of executing a function back to a model.', + 'description': 'Message for passing the result of executing a function back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -4043,7 +4043,7 @@ 'type': 'object', }), 'HumanMessage': dict({ - 'description': 'A Message from a human.', + 'description': 'Message from a human.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', @@ -4092,7 +4092,7 @@ }), 'SystemMessage': dict({ 'description': ''' - A Message for priming AI behavior, usually passed in as the first of a sequence + Message for priming AI behavior, usually passed in as the first of a sequence of input messages. ''', 'properties': dict({ @@ -4137,7 +4137,7 @@ 'type': 'object', }), 'ToolMessage': dict({ - 'description': 'A Message for passing the result of executing a tool back to a model.', + 'description': 'Message for passing the result of executing a tool back to a model.', 'properties': dict({ 'additional_kwargs': dict({ 'title': 'Additional Kwargs', diff --git a/libs/core/tests/unit_tests/runnables/test_runnable.py b/libs/core/tests/unit_tests/runnables/test_runnable.py index a6af8ada3a22c9..9272744bce1c5e 100644 --- a/libs/core/tests/unit_tests/runnables/test_runnable.py +++ b/libs/core/tests/unit_tests/runnables/test_runnable.py @@ -320,7 +320,7 @@ async def typed_async_lambda_impl(x: str) -> int: "definitions": { "AIMessage": { "title": "AIMessage", - "description": "A Message from an AI.", + "description": "Message from an AI.", "type": "object", "properties": { "content": { @@ -355,7 +355,7 @@ async def typed_async_lambda_impl(x: str) -> int: }, "HumanMessage": { "title": "HumanMessage", - "description": "A Message from a human.", + "description": "Message from a human.", "type": "object", "properties": { "content": { @@ -390,7 +390,7 @@ async def typed_async_lambda_impl(x: str) -> int: }, "ChatMessage": { "title": "ChatMessage", - "description": "A Message that can be assigned an arbitrary speaker (i.e. role).", # noqa + "description": "Message that can be assigned an arbitrary speaker (i.e. role).", # noqa "type": "object", "properties": { "content": { @@ -421,7 +421,7 @@ async def typed_async_lambda_impl(x: str) -> int: }, "SystemMessage": { "title": "SystemMessage", - "description": "A Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa + "description": "Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa "type": "object", "properties": { "content": { @@ -451,7 +451,7 @@ async def typed_async_lambda_impl(x: str) -> int: }, "FunctionMessage": { "title": "FunctionMessage", - "description": "A Message for passing the result of executing a function back to a model.", # noqa + "description": "Message for passing the result of executing a function back to a model.", # noqa "type": "object", "properties": { "content": { @@ -482,7 +482,7 @@ async def typed_async_lambda_impl(x: str) -> int: }, "ToolMessage": { "title": "ToolMessage", - "description": "A Message for passing the result of executing a tool back to a model.", # noqa + "description": "Message for passing the result of executing a tool back to a model.", # noqa "type": "object", "properties": { "content": {