Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/oss/deepagents/human-in-the-loop.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ The `interrupt_on` parameter accepts a dictionary mapping tool names to interrup

:::python
```python
from langchain_core.tools import tool
from langchain.tools import tool
from deepagents import create_deep_agent
from langgraph.checkpoint.memory import MemorySaver

Expand Down
2 changes: 1 addition & 1 deletion src/oss/deepagents/middleware.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ The subagents middleware allows you to supply subagents through a `task` tool.

:::python
```python
from langchain_core.tools import tool
from langchain.tools import tool
from langchain.agents import create_agent
from deepagents.middleware.subagents import SubAgentMiddleware

Expand Down
2 changes: 1 addition & 1 deletion src/oss/langchain/agents.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ To customize how tool errors are handled, use the @[`@wrap_tool_call`] decorator
```python wrap
from langchain.agents import create_agent
from langchain.agents.middleware import wrap_tool_call
from langchain_core.messages import ToolMessage
from langchain.messages import ToolMessage


@wrap_tool_call
Expand Down
4 changes: 2 additions & 2 deletions src/oss/langchain/guardrails.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ Use "after agent" hooks to validate final outputs once before returning to the u
```python title="Class syntax"
from langchain.agents.middleware import AgentMiddleware, AgentState, hook_config
from langgraph.runtime import Runtime
from langchain_core.messages import AIMessage
from langchain.messages import AIMessage
from langchain.chat_models import init_chat_model
from typing import Any

Expand Down Expand Up @@ -511,7 +511,7 @@ result = agent.invoke({
```python title="Decorator syntax"
from langchain.agents.middleware import after_agent, AgentState, hook_config
from langgraph.runtime import Runtime
from langchain_core.messages import AIMessage
from langchain.messages import AIMessage
from langchain.chat_models import init_chat_model
from typing import Any

Expand Down
6 changes: 3 additions & 3 deletions src/oss/langchain/middleware/built-in.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -1473,7 +1473,7 @@ The middleware uses an LLM to generate plausible responses for tool calls instea
```python
from langchain.agents import create_agent
from langchain.agents.middleware import LLMToolEmulator
from langchain_core.tools import tool
from langchain.tools import tool


@tool
Expand Down Expand Up @@ -1974,7 +1974,7 @@ The middleware adds two search tools to agents:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import FilesystemFileSearchMiddleware
from langchain_core.messages import HumanMessage
from langchain.messages import HumanMessage


agent = create_agent(
Expand Down Expand Up @@ -2113,7 +2113,7 @@ The middleware caches content up to and including the latest message in each req
from langchain_anthropic import ChatAnthropic
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
from langchain.agents import create_agent
from langchain_core.messages import HumanMessage
from langchain.messages import HumanMessage


LONG_PROMPT = """
Expand Down
4 changes: 2 additions & 2 deletions src/oss/langchain/middleware/custom.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ const dynamicModelMiddleware = createMiddleware({
```python
from langchain.agents.middleware import wrap_tool_call
from langchain.tools.tool_node import ToolCallRequest
from langchain_core.messages import ToolMessage
from langchain.messages import ToolMessage
from langgraph.types import Command
from typing import Callable

Expand Down Expand Up @@ -764,7 +764,7 @@ def monitor_tool(
```python
from langchain.tools.tool_node import ToolCallRequest
from langchain.agents.middleware import AgentMiddleware
from langchain_core.messages import ToolMessage
from langchain.messages import ToolMessage
from langgraph.types import Command
from typing import Callable

Expand Down
2 changes: 1 addition & 1 deletion src/oss/langchain/models.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ response = model.invoke(conversation)
print(response) # AIMessage("J'adore créer des applications.")
```
```python Message objects
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain.messages import HumanMessage, AIMessage, SystemMessage

conversation = [
SystemMessage("You are a helpful assistant that translates English to French."),
Expand Down
4 changes: 2 additions & 2 deletions src/oss/langchain/supervisor.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ Start by defining the tools that require structured inputs. In real applications

:::python
```python
from langchain_core.tools import tool
from langchain.tools import tool

@tool
def create_calendar_event(
Expand Down Expand Up @@ -708,7 +708,7 @@ A supervisor agent coordinates specialized sub-agents (calendar and email)
that are wrapped as tools.
"""

from langchain_core.tools import tool
from langchain.tools import tool
from langchain.agents import create_agent
from langchain.chat_models import init_chat_model

Expand Down
2 changes: 1 addition & 1 deletion src/oss/langgraph/thinking-in-langgraph.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ We'll implement each node as a simple function. Remember: nodes take state, do w
from langgraph.graph import StateGraph, START, END
from langgraph.types import interrupt, Command, RetryPolicy
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from langchain.messages import HumanMessage

llm = ChatOpenAI(model="gpt-4")

Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/migrate/langchain-v1.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ from typing_extensions import Annotated
from pydantic import BaseModel
from langgraph.graph import StateGraph
from langgraph.graph.messages import add_messages
from langchain_core.messages import AnyMessage
from langchain.messages import AnyMessage


class AgentState(BaseModel): # [!code highlight]
Expand Down
Loading