diff --git a/pipeline/preprocessors/handle_auto_links.py b/pipeline/preprocessors/handle_auto_links.py
index ea07494ed..1df76bbf6 100644
--- a/pipeline/preprocessors/handle_auto_links.py
+++ b/pipeline/preprocessors/handle_auto_links.py
@@ -104,11 +104,15 @@ def _transform_link(
(?P
[^\]]+) # Custom title - one or more non-bracket characters
\] # Closing bracket for title
\[ # Opening bracket for link name
- (?P[^\]]+) # Link name - non-bracket chars
+ (?P`)? # Optional backtick before link name
+ (?P[^`\]]+) # Link name - non-backtick/bracket chars
+ (?(backtick_with_title)`|) # Closing backtick if opening backtick present
\] # Closing bracket for link name
| # OR
@\[ # @ symbol followed by opening bracket
- (?P[^\]]+) # Link name - one or more non-bracket characters
+ (?P`)? # Optional backtick before link name
+ (?P[^`\]]+) # Link name - non-backtick/bracket characters
+ (?(backtick)`|) # Closing backtick if opening backtick present
\] # Closing bracket
)
""",
@@ -128,12 +132,18 @@ def replace_cross_reference(match: re.Match[str]) -> str:
if title is not None:
# This is @[title][ref] format
link_name = match.group("link_name_with_title")
+ has_backticks = match.group("backtick_with_title") is not None
custom_title = title
else:
# This is @[ref] format
link_name = match.group("link_name")
+ has_backticks = match.group("backtick") is not None
custom_title = None
+ # If no custom title and backticks are present, add them to the title
+ if custom_title is None and has_backticks:
+ custom_title = f"`{link_name}`"
+
transformed = _transform_link(
link_name, scope, file_path, line_number, custom_title
)
diff --git a/pipeline/preprocessors/link_map.py b/pipeline/preprocessors/link_map.py
index fe858a3f3..ab29ac892 100644
--- a/pipeline/preprocessors/link_map.py
+++ b/pipeline/preprocessors/link_map.py
@@ -130,26 +130,6 @@ class LinkMap(TypedDict):
"update_state": "reference/classes/langgraph.CompiledStateGraph.html#updateState",
},
},
- {
- # Python LangChain reference
- "host": "https://python.langchain.com/api_reference/",
- "scope": "python",
- "links": {
- "AIMessage": "core/messages/langchain_core.messages.ai.AIMessage.html",
- "AIMessageChunk": "core/messages/langchain_core.messages.ai.AIMessageChunk.html",
- "BaseChatModel.invoke": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.invoke",
- "BaseChatModel.stream": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.stream",
- "BaseChatModel.astream_events": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.astream_events",
- "BaseChatModel.batch": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.batch",
- "BaseChatModel.batch_as_completed": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.batch_as_completed",
- "BaseChatModel.bind_tools": "core/language_models/langchain_core.language_models.chat_models.BaseChatModel.html#langchain_core.language_models.chat_models.BaseChatModel.bind_tools",
- "Document": "core/documents/langchain_core.documents.base.Document.html",
- "init_chat_model": "langchain/chat_models/langchain.chat_models.base.init_chat_model.html",
- "RunnableConfig": "core/runnables/langchain_core.runnables.config.RunnableConfig.html",
- "@tool": "core/tools/langchain_core.tools.convert.tool.html",
- "Embeddings": "core/embeddings/langchain_core.embeddings.embeddings.Embeddings.html",
- },
- },
{
"host": "https://v03.api.js.langchain.com/",
"scope": "js",
@@ -172,9 +152,73 @@ class LinkMap(TypedDict):
"host": "https://reference.langchain.com/python/",
"scope": "python",
"links": {
+ # Module pages
+ "langchain": "langchain/langchain",
+ "langchain.agents": "langchain/agents",
+ "langchain.messages": "langchain/messages",
+ "langchain.tools": "langchain/tools",
+ "langchain.chat_models": "langchain/models",
+ "langchain.embeddings": "langchain/embeddings",
+ "langchain_core": "langchain_core/",
+ # Agents
+ "create_agent": "langchain/agents/#langchain.agents.create_agent",
+ "create_agent(tools)": "langchain/agents/#langchain.agents.create_agent(tools)",
+ "system_prompt": "langchain/agents/#langchain.agents.create_agent(system_prompt)",
+ "AgentState": "langchain/agents/#langchain.agents.AgentState",
+ # Middleware
+ "AgentMiddleware": "langchain/middleware/#langchain.agents.middleware.AgentMiddleware",
+ "state_schema": "langchain/middleware/#langchain.agents.middleware.AgentMiddleware.state_schema",
+ "PIIMiddleware": "langchain/middleware/#langchain.agents.middleware.PIIMiddleware",
+ "SummarizationMiddleware": "langchain/middleware/#langchain.agents.middleware.SummarizationMiddleware",
+ "HumanInTheLoopMiddleware": "langchain/middleware/#langchain.agents.middleware.HumanInTheLoopMiddleware",
+ # Messages
+ "AIMessage": "langchain/messages/#langchain.messages.AIMessage",
+ "AIMessageChunk": "langchain/messages/#langchain.messages.AIMessageChunk",
+ "ToolMessage": "langchain/messages/#langchain.messages.ToolMessage",
+ "SystemMessage": "langchain/messages/#langchain.messages.SystemMessage",
+ "trim_messages": "langchain/messages/#langchain.messages.trim_messages",
+ # Content blocks
+ "BaseMessage": "langchain_core/language_models/#langchain_core.messages.BaseMessage",
+ "BaseMessage(content)": "langchain_core/language_models/#langchain_core.messages.BaseMessage.content",
+ "BaseMessage(content_blocks)": "langchain_core/language_models/#langchain_core.messages.BaseMessage.content_blocks",
+ "ContentBlock": "langchain/messages/#langchain.messages.ContentBlock",
+ "TextContentBlock": "langchain/messages/#langchain.messages.TextContentBlock",
+ "ReasoningContentBlock": "langchain/messages/#langchain.messages.ReasoningContentBlock",
+ "NonStandardContentBlock": "langchain/messages/#langchain.messages.NonStandardContentBlock",
+ "ImageContentBlock": "langchain/messages/#langchain.messages.ImageContentBlock",
+ "VideoContentBlock": "langchain/messages/#langchain.messages.VideoContentBlock",
+ "AudioContentBlock": "langchain/messages/#langchain.messages.AudioContentBlock",
+ "PlainTextContentBlock": "langchain/messages/#langchain.messages.PlainTextContentBlock",
+ "FileContentBlock": "langchain/messages/#langchain.messages.FileContentBlock",
+ "ToolCall": "langchain/messages/#langchain.messages.ToolCall",
+ "ToolCallChunk": "langchain/messages/#langchain.messages.ToolCallChunk",
+ "ServerToolCall": "langchain/messages/#langchain.messages.ServerToolCall",
+ "ServerToolCallChunk": "langchain/messages/#langchain.messages.ServerToolCallChunk",
+ "ServerToolResult": "langchain/messages/#langchain.messages.ServerToolResult",
+ # Integrations
"langchain-openai": "integrations/langchain_openai",
"ChatOpenAI": "integrations/langchain_openai/#langchain_openai.ChatOpenAI",
"AzureChatOpenAI": "integrations/langchain_openai/#langchain_openai.AzureChatOpenAI",
+ # Models
+ "init_chat_model": "langchain/models/#langchain.chat_models.init_chat_model",
+ "init_chat_model(model_provider)": "langchain/models/#langchain.chat_models.init_chat_model(model_provider)",
+ "BaseChatModel": "langchain_core/language_models/#langchain_core.language_models.chat_models.BaseChatModel",
+ "BaseChatModel.invoke": "langchain_core/language_models/#langchain_core.language_models.chat_models.BaseChatModel.invoke",
+ "BaseChatModel.stream": "langchain_core/language_models/#langchain_core.language_models.chat_models.BaseChatModel.stream",
+ "BaseChatModel.astream_events": "langchain_core/language_models/#langchain_core.language_models.chat_models.BaseChatModel.astream_events",
+ "BaseChatModel.batch": "langchain_core.language_models.chat_models.BaseChatModel.batch",
+ "BaseChatModel.batch_as_completed": "langchain_core.language_models.chat_models.BaseChatModel.batch_as_completed",
+ "BaseChatModel.bind_tools": "langchain_core/language_models/#langchain_core.language_models.chat_models.BaseChatModel.bind_tools",
+ # Tools
+ "@tool": "langchain/tools/#langchain.tools.tool",
+ "BaseTool": "langchain/tools/#langchain.tools.BaseTool",
+ # Embeddings
+ "init_embeddings": "langchain_core/embeddings/#langchain_core.embeddings.embeddings.Embeddings",
+ "Embeddings": "langchain_core/embeddings/#langchain_core.embeddings.embeddings.Embeddings",
+ # Documents
+ "Document": "langchain_core/documents/#langchain_core.documents.base.Document",
+ # Runnables
+ "RunnableConfig": "langchain_core/runnables/#langchain_core.runnables.RunnableConfig",
},
},
{
diff --git a/reference/python/README.md b/reference/python/README.md
index b19c3ca3a..3699ea3f2 100644
--- a/reference/python/README.md
+++ b/reference/python/README.md
@@ -158,3 +158,228 @@ The `pyproject.dev.toml` file expects repositories to be cloned in this structur
If you only need to work on specific packages, you can comment out the others in `pyproject.dev.toml`.
+---
+
+## MkDocs/mkdocstrings Python Cross-Reference Linking Syntax
+
+### Basic Syntax
+
+The general format for cross-references in mkdocstrings is:
+
+```markdown
+[display text][python.path.to.object]
+```
+
+If you want the object name as the display text, use backticks:
+
+```markdown
+[`object_name`][python.path.to.object]
+```
+
+### Linking to Different Python Objects
+
+#### Modules
+
+```markdown
+[`langchain.agents`][langchain.agents]
+
+# or
+
+[agents module][langchain.agents]
+```
+
+#### Classes
+
+```markdown
+[`ChatOpenAI`][langchain_openai.ChatOpenAI]
+
+# or
+
+[the ChatOpenAI class][langchain_openai.ChatOpenAI]
+```
+
+#### Functions
+
+```markdown
+[`init_chat_model`][langchain.chat_models.init_chat_model]
+
+# or
+
+[initialization function][langchain.chat_models.init_chat_model]
+```
+
+#### Methods
+
+```markdown
+[`invoke`][langchain_openai.ChatOpenAI.invoke]
+
+# or
+
+[the invoke method][langchain_openai.ChatOpenAI.invoke]
+```
+
+#### Class Attributes
+
+```markdown
+[`temperature`][langchain_openai.ChatOpenAI.temperature]
+
+# or
+
+[the temperature attribute][langchain_openai.ChatOpenAI.temperature]
+```
+
+#### Function/Method Parameters
+
+**Note:** Parameter linking requires the `parameter_headings` option to be enabled in the `mkdocstrings` config (in `mkdocs.yml`). This generates permalinks and TOC entries for each parameter, so don't disable it.
+
+Use `(parameter_name)` syntax to link to specific parameters:
+
+```markdown
+[`model_provider`][langchain.chat_models.init_chat_model(model_provider)]
+
+# or
+
+[the model_provider parameter][langchain.chat_models.init_chat_model(model_provider)]
+```
+
+For method parameters:
+
+```markdown
+[`max_tokens`][langchain_openai.ChatOpenAI.invoke(max_tokens)]
+```
+
+For class `__init__` parameters (when using `merge_init_into_class`):
+
+```markdown
+[`temperature`][langchain_openai.ChatOpenAI(temperature)]
+```
+
+For variadic parameters:
+
+```markdown
+[`*args`][package.module.function(*args)]
+[`**kwargs`][package.module.function(**kwargs)]
+```
+
+#### Return Values
+
+Not directly linkable, but you can link to the return type class:
+
+```markdown
+Returns a [`ChatResult`][langchain_core.outputs.ChatResult] object.
+```
+
+#### Nested Classes
+
+```markdown
+[`Config`][langchain_core.runnables.Runnable.Config]
+```
+
+### Advanced Patterns
+
+#### Linking Within Same Module
+
+If you're documenting within the same module, you can use relative paths:
+
+```markdown
+See also [`.other_method`][.other_method]
+```
+
+#### Linking to Exceptions
+
+```markdown
+Raises [`ValueError`][ValueError] if input is invalid.
+Raises [`CustomError`][my_package.exceptions.CustomError]
+```
+
+#### Linking to Type Aliases
+
+```markdown
+[`RunnableConfig`][langchain_core.runnables.config.RunnableConfig]
+```
+
+#### Multiple Links in Args Documentation
+
+```python
+def create_agent(
+ model: BaseChatModel,
+ tools: Sequence[BaseTool],
+) -> AgentExecutor:
+ """
+ Create an agent executor.
+
+ Args:
+ model: A [`BaseChatModel`][langchain_core.language_models.BaseChatModel]
+ instance. You can use [`init_chat_model`][langchain.chat_models.init_chat_model]
+ to initialize from a string identifier (see the
+ [`model_provider`][langchain.chat_models.init_chat_model(model_provider)]
+ parameter for available providers).
+ tools: A sequence of [`BaseTool`][langchain_core.tools.BaseTool] instances.
+ Use the [`@tool`][langchain_core.tools.tool] decorator to create tools.
+
+ Returns:
+ An [`AgentExecutor`][langchain.agents.AgentExecutor] instance.
+ """
+```
+
+### Best Practices
+
+#### 1. Use Backticks for Code Identifiers
+
+```markdown
+✅ [`init_chat_model`][langchain.chat_models.init_chat_model]
+❌ [init_chat_model][langchain.chat_models.init_chat_model]
+```
+
+#### 2. Use Full Paths for Clarity
+
+```markdown
+✅ [`BaseChatModel`][langchain_core.language_models.BaseChatModel]
+❌ [`BaseChatModel`][BaseChatModel] # May not resolve correctly
+```
+
+#### 3. Link to Public APIs Only
+
+Only link to public, exported APIs that users should interact with. Avoid linking to internal implementation details (e.g., objects prefixed with `_`).
+
+#### 4. Use Descriptive Text for Complex References
+
+```markdown
+✅ See the [`model_provider`][langchain.chat_models.init_chat_model(model_provider)]
+ parameter for available providers.
+❌ See [`model_provider`][langchain.chat_models.init_chat_model(model_provider)].
+```
+
+#### 5. Verify Links Build Correctly
+
+Build and manually check the generated HTML to ensure links resolve correctly.
+
+### Quick Reference Table
+
+| Object Type | Syntax | Example |
+|------------|--------|---------|
+| Module | `[text][module.path]` | ``[`agents`][langchain.agents]`` |
+| Class | `[text][module.Class]` | ``[`ChatOpenAI`][langchain_openai.ChatOpenAI]`` |
+| Function | `[text][module.function]` | ``[`init_chat_model`][langchain.chat_models.init_chat_model]`` |
+| Method | `[text][module.Class.method]` | ``[`invoke`][langchain_openai.ChatOpenAI.invoke]`` |
+| Attribute | `[text][module.Class.attr]` | ``[`temperature`][langchain_openai.ChatOpenAI.temperature]`` |
+| Function Param | `[text][module.function(param)]` | ``[`model_provider`][langchain.chat_models.init_chat_model(model_provider)]`` |
+| Method Param | `[text][module.Class.method(param)]` | ``[`max_tokens`][langchain_openai.ChatOpenAI.invoke(max_tokens)]`` |
+| Class Param | `[text][module.Class(param)]` | ``[`temperature`][langchain_openai.ChatOpenAI(temperature)]`` |
+
+### Testing Links
+
+To test if a link will work:
+
+1. Check the object is in `__init__.py` exports
+2. Verify the import path: `from module.path import Object`
+3. Build docs with `--strict` mode
+4. Check the generated HTML for broken links
+
+```bash
+mkdocs build --strict
+mkdocs serve # Preview at http://127.0.0.1:8000/
+```
+
+This syntax works with the `mkdocstrings` plugin for MkDocs using the Python handler. Adjust paths according to your package structure and exports.
+
diff --git a/reference/python/docs/agents.md/index.md b/reference/python/docs/agents.md/index.md
deleted file mode 100644
index e03168c49..000000000
--- a/reference/python/docs/agents.md/index.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Agents
-
-::: langchain.agents
diff --git a/reference/python/docs/index.md b/reference/python/docs/index.md
index 2713ff5f1..27645b099 100644
--- a/reference/python/docs/index.md
+++ b/reference/python/docs/index.md
@@ -2,7 +2,9 @@
title: LangChain Python Reference
---
-Welcome to the [LangChain](https://langchain.com) Python reference documentation! These pages detail the core interfaces you will use when building applications with LangChain and LangGraph. Each section covers a different part of the ecosystem. Use the navigation header to view documentation for specific packages.
+Welcome to the [LangChain](https://langchain.com) Python reference documentation!
+
+These pages detail the core interfaces you will use when building applications with LangChain and LangGraph. Each section covers a different part of the ecosystem. Use the navigation header to view documentation for specific packages.
!!! warning "Work in progress"
This site is a work in progress. If you have any suggestions or find any issues, please [open an issue on GitHub](https://github.com/langchain-ai/docs/issues/new?template=04-reference-docs.yml).
diff --git a/reference/python/docs/langchain/embeddings.md b/reference/python/docs/langchain/embeddings.md
index 1dafb17fc..cce8d3d99 100644
--- a/reference/python/docs/langchain/embeddings.md
+++ b/reference/python/docs/langchain/embeddings.md
@@ -6,5 +6,5 @@
options:
group_by_category: false
members:
- - init_embedding
+ - init_embeddings
- Embeddings
diff --git a/reference/python/docs/langchain/messages.md b/reference/python/docs/langchain/messages.md
index c8eb086ef..dc594aa67 100644
--- a/reference/python/docs/langchain/messages.md
+++ b/reference/python/docs/langchain/messages.md
@@ -1,3 +1,35 @@
# Messages
::: langchain.messages
+ options:
+ summary: true
+ inherited_members: false
+ group_by_category: false
+ members:
+ - AIMessage
+ - AIMessageChunk
+ - HumanMessage
+ - SystemMessage
+ - AnyMessage
+ - MessageLikeRepresentation
+ - ToolMessage
+ - ToolCall
+ - InvalidToolCall
+ - ToolCallChunk
+ - ServerToolCall
+ - ServerToolCallChunk
+ - ServerToolResult
+ - ContentBlock
+ - TextContentBlock
+ - Annotation
+ - Citation
+ - NonStandardAnnotation
+ - ReasoningContentBlock
+ - DataContentBlock
+ - ImageContentBlock
+ - VideoContentBlock
+ - AudioContentBlock
+ - PlainTextContentBlock
+ - FileContentBlock
+ - NonStandardContentBlock
+ - trim_messages
diff --git a/reference/python/docs/langchain/middleware.md b/reference/python/docs/langchain/middleware.md
index 494d828ac..c32739a74 100644
--- a/reference/python/docs/langchain/middleware.md
+++ b/reference/python/docs/langchain/middleware.md
@@ -1,3 +1,27 @@
# Middleware
+
::: langchain.agents.middleware
+ options:
+ summary:
+ #
+ classes: true
+ group_by_category: false
+ members:
+ - ContextEditingMiddleware
+ - HumanInTheLoopMiddleware
+ - LLMToolSelectorMiddleware
+ - LLMToolEmulator
+ - ModelCallLimitMiddleware
+ - ModelFallbackMiddleware
+ - PIIMiddleware
+ - PIIDetectionError
+ - SummarizationMiddleware
+ - TodoListMiddleWare
+ - ToolCallLimitMiddleware
+ - AgentMiddleware
+ - AgentState
+ - ClearToolUsesEdit
+ - InterruptOnConfig
+ - ModelRequest
+ - ModelResponse
diff --git a/reference/python/docs/langchain_core/language_models.md b/reference/python/docs/langchain_core/language_models.md
index 0ddb51c61..bc1fed183 100644
--- a/reference/python/docs/langchain_core/language_models.md
+++ b/reference/python/docs/langchain_core/language_models.md
@@ -8,8 +8,21 @@
options:
members:
- BaseChatModel
+::: langchain_core.messages
+ options:
+ members:
+ - BaseMessage
+ - BaseMessageChunk
::: langchain_core.language_models.fake_chat_models
options:
members:
- GenericFakeChatModel
- ParrotFakeChatModel
+::: langchain_core.language_models.base
+ options:
+ members:
+ - BaseLanguageModel
+ - LanguageModelInput
+ - LanguageModelOutput
+ - LanguageModelLike
+ - LangSmithParams
diff --git a/reference/python/docs/langchain_core/runnables.md b/reference/python/docs/langchain_core/runnables.md
index 880c2de9a..7e63302a1 100644
--- a/reference/python/docs/langchain_core/runnables.md
+++ b/reference/python/docs/langchain_core/runnables.md
@@ -7,3 +7,4 @@
::: langchain_core.runnables.base.RunnableParallel
::: langchain_core.runnables.base.RunnableSequence
::: langchain_core.runnables.base.RunnableSerializable
+::: langchain_core.runnables.RunnableConfig
diff --git a/reference/python/docs/langgraph/index.md b/reference/python/docs/langgraph/index.md
index 96b5dbcea..c28ec4558 100644
--- a/reference/python/docs/langgraph/index.md
+++ b/reference/python/docs/langgraph/index.md
@@ -8,7 +8,9 @@ hide:
[](https://opensource.org/licenses/MIT)
[](https://pypistats.org/packages/langgraph)
-Welcome to the LangGraph reference docs! These pages detail the core interfaces you will use when building with LangGraph. Each section covers a different part of the ecosystem.
+Welcome to the LangGraph reference docs!
+
+These pages detail the core interfaces you will use when building with LangGraph. Each section covers a different part of the ecosystem.
## :simple-langgraph:{ .lg .middle } `langgraph`
diff --git a/reference/python/mkdocs.yml b/reference/python/mkdocs.yml
index 19e8919ac..386e221f2 100644
--- a/reference/python/mkdocs.yml
+++ b/reference/python/mkdocs.yml
@@ -193,6 +193,7 @@ plugins:
# Import external inventories for cross-referencing documentation
- https://docs.python.org/3/objects.inv
- https://docs.pydantic.dev/latest/objects.inv
+ - https://typing-extensions.readthedocs.io/en/latest/objects.inv
options:
# # https://mkdocstrings.github.io/python/usage/#load_external_modules
# load_external_modules: true
@@ -249,7 +250,7 @@ plugins:
# https://mkdocstrings.github.io/python/usage/configuration/docstrings/#merge_init_into_class
# Merges init method with class signature & docstring
- merge_init_into_class: true
+ # merge_init_into_class: true
# https://mkdocstrings.github.io/python/usage/configuration/signatures/#show_signature_annotations
# Show type annotations in signatures
@@ -292,6 +293,10 @@ plugins:
# Uses our custom `deprecated` admonition
# Title and label can be customized if desired
kind: deprecated
+ - griffe_inherited_docstrings:
+ # https://mkdocstrings.github.io/griffe/extensions/official/inherited-docstrings/
+ # Inherit docstrings from parent classes when a method doesn't have its own
+ enabled: true
diff --git a/reference/python/pyproject.dev.toml b/reference/python/pyproject.dev.toml
index 612cfdadc..f0f597d95 100644
--- a/reference/python/pyproject.dev.toml
+++ b/reference/python/pyproject.dev.toml
@@ -22,6 +22,7 @@ dependencies = [
# I copied them over because they were in LangGraph's
# Not sure if each were used/apply to this project
"griffe-warnings-deprecated>=1.1.0,<2.0.0",
+ "griffe-inherited-docstrings>=1.1.2,<2.0.0",
# Python
"mkdocstrings-python>=1.7.0,<2.0.0",
# Format / Lint
diff --git a/reference/python/pyproject.prod.toml b/reference/python/pyproject.prod.toml
index c97e6d965..2da042072 100644
--- a/reference/python/pyproject.prod.toml
+++ b/reference/python/pyproject.prod.toml
@@ -22,6 +22,7 @@ dependencies = [
# I copied them over because they were in LangGraph's
# Not sure if each were used/apply to this project
"griffe-warnings-deprecated>=1.1.0,<2.0.0",
+ "griffe-inherited-docstrings>=1.1.2,<2.0.0",
# Python
"mkdocstrings-python>=1.7.0,<2.0.0",
# Format / Lint
diff --git a/reference/python/pyproject.toml b/reference/python/pyproject.toml
index c97e6d965..2da042072 100644
--- a/reference/python/pyproject.toml
+++ b/reference/python/pyproject.toml
@@ -22,6 +22,7 @@ dependencies = [
# I copied them over because they were in LangGraph's
# Not sure if each were used/apply to this project
"griffe-warnings-deprecated>=1.1.0,<2.0.0",
+ "griffe-inherited-docstrings>=1.1.2,<2.0.0",
# Python
"mkdocstrings-python>=1.7.0,<2.0.0",
# Format / Lint
diff --git a/reference/python/uv.lock b/reference/python/uv.lock
index df4e679a0..ec0ef9a16 100644
--- a/reference/python/uv.lock
+++ b/reference/python/uv.lock
@@ -1198,6 +1198,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/b1/9ff6578d789a89812ff21e4e0f80ffae20a65d5dd84e7a17873fe3b365be/griffe-1.14.0-py3-none-any.whl", hash = "sha256:0e9d52832cccf0f7188cfe585ba962d2674b241c01916d780925df34873bceb0", size = 144439, upload-time = "2025-09-05T15:02:27.511Z" },
]
+[[package]]
+name = "griffe-inherited-docstrings"
+version = "1.1.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "griffe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/28/02/36d9929bb8ad929941b27117aba4d850b8a9f2c12f982e2b59ab4bc4d80b/griffe_inherited_docstrings-1.1.2.tar.gz", hash = "sha256:0a489ac4bb6093a7789d014b23083b4cbb1ab139f0b8dd878c8f3a4f8e892624", size = 27541, upload-time = "2025-09-05T15:17:13.081Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ad/12/4c67b644dc5965000874908dfa89d05ba878d5ca22a9b4ebfbfadc41467b/griffe_inherited_docstrings-1.1.2-py3-none-any.whl", hash = "sha256:b1cf61fff6e12a769db75de5718ddbbb5361b2cc4155af1f1ad86c13f56c197b", size = 6709, upload-time = "2025-09-05T15:17:11.853Z" },
+]
+
[[package]]
name = "griffe-warnings-deprecated"
version = "1.1.0"
@@ -1671,7 +1683,7 @@ wheels = [
[[package]]
name = "langchain"
version = "1.0.0a15"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "langgraph" },
@@ -1681,7 +1693,7 @@ dependencies = [
[[package]]
name = "langchain-anthropic"
version = "1.0.0a5"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "anthropic" },
{ name = "langchain-core" },
@@ -1701,7 +1713,7 @@ dependencies = [
[[package]]
name = "langchain-aws"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#f36bec98f64b65ad4c884b29e5b79469df08b3c3" }
+source = { git = "https://github.com/langchain-ai/langchain-aws.git?subdirectory=libs%2Faws#53298392adfbe2c02fcc14cf75383920b7935757" }
dependencies = [
{ name = "boto3" },
{ name = "langchain-core" },
@@ -1712,7 +1724,7 @@ dependencies = [
[[package]]
name = "langchain-chroma"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fchroma#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "chromadb" },
{ name = "langchain-core" },
@@ -1722,7 +1734,7 @@ dependencies = [
[[package]]
name = "langchain-classic"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "langchain-text-splitters" },
@@ -1759,7 +1771,7 @@ wheels = [
[[package]]
name = "langchain-core"
version = "1.0.0rc1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fcore#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "jsonpatch" },
{ name = "langsmith" },
@@ -1773,7 +1785,7 @@ dependencies = [
[[package]]
name = "langchain-deepseek"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fdeepseek#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "langchain-openai" },
@@ -1782,7 +1794,7 @@ dependencies = [
[[package]]
name = "langchain-exa"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fexa#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "exa-py" },
{ name = "langchain-core" },
@@ -1791,7 +1803,7 @@ dependencies = [
[[package]]
name = "langchain-fireworks"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Ffireworks#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "aiohttp" },
{ name = "fireworks-ai" },
@@ -1845,7 +1857,7 @@ dependencies = [
[[package]]
name = "langchain-groq"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fgroq#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "groq" },
{ name = "langchain-core" },
@@ -1877,7 +1889,7 @@ dependencies = [
[[package]]
name = "langchain-nomic"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fnomic#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "nomic" },
@@ -1887,7 +1899,7 @@ dependencies = [
[[package]]
name = "langchain-ollama"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Follama#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "ollama" },
@@ -1896,7 +1908,7 @@ dependencies = [
[[package]]
name = "langchain-openai"
version = "1.0.0a4"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fopenai#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "openai" },
@@ -1906,7 +1918,7 @@ dependencies = [
[[package]]
name = "langchain-perplexity"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fperplexity#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "openai" },
@@ -1915,7 +1927,7 @@ dependencies = [
[[package]]
name = "langchain-prompty"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fprompty#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "pyyaml" },
@@ -1924,7 +1936,7 @@ dependencies = [
[[package]]
name = "langchain-qdrant"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fqdrant#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
{ name = "pydantic" },
@@ -1936,6 +1948,7 @@ name = "langchain-reference-docs"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
+ { name = "griffe-inherited-docstrings" },
{ name = "griffe-warnings-deprecated" },
{ name = "langchain" },
{ name = "langchain-anthropic" },
@@ -1984,6 +1997,7 @@ dependencies = [
[package.metadata]
requires-dist = [
+ { name = "griffe-inherited-docstrings", specifier = ">=1.1.2,<2.0.0" },
{ name = "griffe-warnings-deprecated", specifier = ">=1.1.0,<2.0.0" },
{ name = "langchain", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Flangchain_v1" },
{ name = "langchain-anthropic", git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fanthropic" },
@@ -2044,7 +2058,7 @@ dependencies = [
[[package]]
name = "langchain-text-splitters"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Ftext-splitters#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "langchain-core" },
]
@@ -2052,7 +2066,7 @@ dependencies = [
[[package]]
name = "langchain-xai"
version = "1.0.0a1"
-source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#26e0a00c4c5ffb1fd9a53bd71ce489b4664a8583" }
+source = { git = "https://github.com/langchain-ai/langchain.git?subdirectory=libs%2Fpartners%2Fxai#707e96c5411cadaedb0e4c8bbcc5f88bd26684ca" }
dependencies = [
{ name = "aiohttp" },
{ name = "langchain-core" },
diff --git a/src/langsmith/human-in-the-loop-time-travel.mdx b/src/langsmith/human-in-the-loop-time-travel.mdx
index 78f4ed78f..2122f2954 100644
--- a/src/langsmith/human-in-the-loop-time-travel.mdx
+++ b/src/langsmith/human-in-the-loop-time-travel.mdx
@@ -27,7 +27,7 @@ To time travel using the LangGraph Server API (via the LangGraph SDK):
joke: NotRequired[str]
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0,
)
diff --git a/src/langsmith/observability-studio.mdx b/src/langsmith/observability-studio.mdx
index afe65ce4e..1d67aa5f9 100644
--- a/src/langsmith/observability-studio.mdx
+++ b/src/langsmith/observability-studio.mdx
@@ -78,7 +78,7 @@ class Configuration(BaseModel):
model: Annotated[
Literal[
- "anthropic/claude-3-7-sonnet-latest",
+ "anthropic/claude-sonnet-4-5-20250929",
"anthropic/claude-3-5-haiku-latest",
"openai/o1",
"openai/gpt-4o-mini",
diff --git a/src/langsmith/streaming.mdx b/src/langsmith/streaming.mdx
index b35c1d32f..6022aa118 100644
--- a/src/langsmith/streaming.mdx
+++ b/src/langsmith/streaming.mdx
@@ -668,7 +668,7 @@ The streamed output from [`messages-tuple` mode](#supported-stream-modes) is a t
)
```
- 1. Note that the message events are emitted even when the LLM is run using `.invoke` rather than `.stream`.
+ 1. Note that the message events are emitted even when the LLM is run using `invoke` rather than `stream`.
diff --git a/src/oss/concepts/context.mdx b/src/oss/concepts/context.mdx
index e98b1d10c..77e50b4d6 100644
--- a/src/oss/concepts/context.mdx
+++ b/src/oss/concepts/context.mdx
@@ -70,7 +70,7 @@ graph.invoke(
return [{"role": "system", "content": system_msg}] + state["messages"]
agent = create_react_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[get_weather],
prompt=prompt,
context_schema=ContextSchema
@@ -166,7 +166,7 @@ await graph.invoke(
return f"You are a helpful assistant. User's name is {user_name}"
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[...],
state_schema=CustomState, # [!code highlight]
middleware=[personalized_prompt], # [!code highlight]
@@ -200,7 +200,7 @@ await graph.invoke(
});
const agent = createAgent({ // [!code highlight]
- model: "anthropic:claude-3-7-sonnet-latest",
+ model: "anthropic:claude-sonnet-4-5-20250929",
tools: [/* your tools here */],
middleware: [personalizedPrompt] as const, // [!code highlight]
});
diff --git a/src/oss/javascript/integrations/chat/openai.mdx b/src/oss/javascript/integrations/chat/openai.mdx
index 6472e2022..45a9f5b3d 100644
--- a/src/oss/javascript/integrations/chat/openai.mdx
+++ b/src/oss/javascript/integrations/chat/openai.mdx
@@ -198,7 +198,7 @@ await fineTunedLlm.invoke("Hi there!");
## Generation metadata
-If you need additional information like logprobs or token usage, these will be returned directly in the `.invoke` response within the `response_metadata` field on the message.
+If you need additional information like logprobs or token usage, these will be returned directly in the `invoke` response within the `response_metadata` field on the message.
**Requires `@langchain/core` version >=0.1.48.**
diff --git a/src/oss/langchain/agents.mdx b/src/oss/langchain/agents.mdx
index 51d271a6f..505b1c897 100644
--- a/src/oss/langchain/agents.mdx
+++ b/src/oss/langchain/agents.mdx
@@ -15,7 +15,7 @@ import AlphaCalloutJS from '/snippets/alpha-lc-callout-js.mdx';
Agents combine language models with tools to create systems that can reason about tasks, decide which tools to use, and iteratively work towards solutions.
:::python
-`create_agent()` provides a production-ready agent implementation.
+@[`create_agent`] provides a production-ready agent implementation.
:::
:::js
`createAgent()` provides a production-ready agent implementation.
@@ -56,13 +56,13 @@ graph TD
:::python
-`create_agent()` builds a **graph**-based agent runtime using [LangGraph](/oss/langgraph/overview). A graph consists of nodes (steps) and edges (connections) that define how your agent processes information. The agent moves through this graph, executing nodes like the model node (which calls the model), the tools node (which executes tools), or middleware.
+@[`create_agent`] builds a **graph**-based agent runtime using [LangGraph](/oss/langgraph/overview). A graph consists of nodes (steps) and edges (connections) that define how your agent processes information. The agent moves through this graph, executing nodes like the model node (which calls the model), the tools node (which executes tools), or middleware.
:::
:::js
`createAgent()` builds a **graph**-based agent runtime using [LangGraph](/oss/langgraph/overview). A graph consists of nodes (steps) and edges (connections) that define how your agent processes information. The agent moves through this graph, executing nodes like the model node (which calls the model), the tools node (which executes tools), or middleware.
:::
-Learn more about the [graph API](/oss/langgraph/graph-api).
+Learn more about the [Graph API](/oss/langgraph/graph-api).
@@ -99,7 +99,7 @@ const agent = createAgent({
:::python
- Model identifier strings support automatic inference (e.g., `"gpt-5"` will be inferred as `"openai:gpt-5"`).
+ Model identifier strings support automatic inference (e.g., `"gpt-5"` will be inferred as `"openai:gpt-5"`). Refer to the @[reference][init_chat_model(model_provider)] to see a full list of model identifier strings.
For more control over the model configuration, initialize a model instance directly using the provider package:
@@ -181,7 +181,7 @@ agent = create_agent(
```
-Pre-bound models (models with `.bind_tools()` already called) are not supported when using structured output. If you need dynamic model selection with structured output, ensure the models passed to the middleware are not pre-bound.
+Pre-bound models (models with @[`bind_tools`][BaseChatModel.bind_tools] already called) are not supported when using structured output. If you need dynamic model selection with structured output, ensure the models passed to the middleware are not pre-bound.
:::
@@ -321,7 +321,7 @@ agent = create_agent(
)
```
-The agent will return a `ToolMessage` with the custom error message when a tool fails:
+The agent will return a @[`ToolMessage`] with the custom error message when a tool fails:
```python
# result["messages"]
@@ -376,7 +376,7 @@ Learn more about tools in [tools](/oss/langchain/tools).
#### Tool use in the ReAct loop
-Agents follow the ReAct (*Reasoning* + *Acting*) pattern, alternating between brief reasoning steps with targeted tool calls and feeding the resulting observations into subsequent decisions until they can deliver a final answer.
+Agents follow the ReAct ("Reasoning + Acting") pattern, alternating between brief reasoning steps with targeted tool calls and feeding the resulting observations into subsequent decisions until they can deliver a final answer.
Prompt: Identify the current most popular wireless headphones and verify availability.
@@ -437,7 +437,7 @@ To learn more about tools, see [Tools](/oss/langchain/tools).
### System prompt
-You can shape how your agent approaches tasks by providing a prompt. The `system_prompt` parameter can be provided as a string:
+You can shape how your agent approaches tasks by providing a prompt. The @[`system_prompt`] parameter can be provided as a string:
:::python
```python wrap
@@ -669,7 +669,7 @@ Information stored in the state can be thought of as the [short-term memory](/os
:::python
-Custom state schemas must extend `AgentState` as a `TypedDict`. Define them in middleware using the `state_schema` attribute:
+Custom state schemas must extend @[`AgentState`] as a `TypedDict`. Define them in middleware using the `state_schema` attribute:
```python wrap
from typing import Annotated, TypedDict
@@ -728,7 +728,7 @@ To learn more about memory, see [Memory](/oss/concepts/memory). For information
### Streaming
-We've seen how the agent can be called with `.invoke` to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.
+We've seen how the agent can be called with `invoke` to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.
:::python
```python wrap
diff --git a/src/oss/langchain/long-term-memory.mdx b/src/oss/langchain/long-term-memory.mdx
index d7a362f6c..b1a429fc0 100644
--- a/src/oss/langchain/long-term-memory.mdx
+++ b/src/oss/langchain/long-term-memory.mdx
@@ -142,7 +142,7 @@ def get_user_info() -> str:
return str(user_info.value) if user_info else "Unknown user"
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[get_user_info],
# Pass store to agent - enables agent to access store when running tools
store=store, # [!code highlight]
@@ -259,7 +259,7 @@ def save_user_info(user_info: UserInfo) -> str:
return "Successfully saved user info."
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[save_user_info],
store=store, # [!code highlight]
context_schema=Context
diff --git a/src/oss/langchain/mcp.mdx b/src/oss/langchain/mcp.mdx
index 6490d5894..ab8d7350c 100644
--- a/src/oss/langchain/mcp.mdx
+++ b/src/oss/langchain/mcp.mdx
@@ -92,7 +92,7 @@ client = MultiServerMCPClient( # [!code highlight]
tools = await client.get_tools() # [!code highlight]
agent = create_agent(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
tools # [!code highlight]
)
math_response = await agent.ainvoke(
diff --git a/src/oss/langchain/messages.mdx b/src/oss/langchain/messages.mdx
index c5ad28382..6c4ac8c0c 100644
--- a/src/oss/langchain/messages.mdx
+++ b/src/oss/langchain/messages.mdx
@@ -1788,8 +1788,10 @@ const imageBlock: ContentBlock.Multimodal.Image = {
```
:::
+View the canonical type definitions in the @[API reference][langchain.messages].
+
- Content blocks were introduced as a new property on messages in LangChain v1 to standardize content formats across providers while maintaining backward compatibility with existing code. Content blocks are not a replacement for the `content` property, but rather a new property that can be used to access the content of a message in a standardized format.
+ Content blocks were introduced as a new property on messages in LangChain v1 to standardize content formats across providers while maintaining backward compatibility with existing code. Content blocks are not a replacement for the @[`content`][BaseMessage(content)] property, but rather a new property that can be used to access the content of a message in a standardized format.
## Use with chat models
diff --git a/src/oss/langchain/observability.mdx b/src/oss/langchain/observability.mdx
index 75d549fbc..0d0361b17 100644
--- a/src/oss/langchain/observability.mdx
+++ b/src/oss/langchain/observability.mdx
@@ -13,7 +13,7 @@ import observability from '/snippets/oss/observability.mdx';
:::
-Observability is crucial for understanding how your agents behave in production. With LangChain's `create_agent()`, you get built-in observability through [LangSmith](https://smith.langchain.com/) - a powerful platform for tracing, debugging, evaluating, and monitoring your LLM applications.
+Observability is crucial for understanding how your agents behave in production. With LangChain's @[`create_agent`], you get built-in observability through [LangSmith](https://smith.langchain.com/) - a powerful platform for tracing, debugging, evaluating, and monitoring your LLM applications.
Traces capture every step your agent takes, from the initial user input to the final response, including all tool calls, model interactions, and decision points. This enables you to debug your agents, evaluate performance, and monitor usage.
diff --git a/src/oss/langchain/overview.mdx b/src/oss/langchain/overview.mdx
index a37b9a89c..4e428f153 100644
--- a/src/oss/langchain/overview.mdx
+++ b/src/oss/langchain/overview.mdx
@@ -75,7 +75,7 @@ def get_weather(city: str) -> str:
return f"It's always sunny in {city}!"
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[get_weather],
system_prompt="You are a helpful assistant",
)
@@ -105,7 +105,7 @@ const getWeather = tool(
);
const agent = createAgent({
- model: "anthropic:claude-3-7-sonnet-latest",
+ model: "anthropic:claude-sonnet-4-5-20250929",
tools: [getWeather],
});
diff --git a/src/oss/langchain/quickstart.mdx b/src/oss/langchain/quickstart.mdx
index 50f7fff62..c8c2b16b9 100644
--- a/src/oss/langchain/quickstart.mdx
+++ b/src/oss/langchain/quickstart.mdx
@@ -144,7 +144,7 @@ Let's walk through each step:
Tools should be well-documented: their name, description, and argument names become part of the model's prompt.
- We've defined them here as plain Python functions, but LangChain's @[@tool decorator][@tool] decorator is often used to add extra metadata.
+ We've defined them here as plain Python functions, but LangChain's @[`@tool` decorator][@tool] is often used to add extra metadata.
:::
diff --git a/src/oss/langchain/short-term-memory.mdx b/src/oss/langchain/short-term-memory.mdx
index a5843c7d6..fb4b1b66a 100644
--- a/src/oss/langchain/short-term-memory.mdx
+++ b/src/oss/langchain/short-term-memory.mdx
@@ -67,7 +67,7 @@ import { MemorySaver } from "@langchain/langgraph";
const checkpointer = new MemorySaver();
const agent = createAgent({
- model: "anthropic:claude-3-7-sonnet-latest",
+ model: "anthropic:claude-sonnet-4-5-20250929",
tools: [],
checkpointer,
});
diff --git a/src/oss/langchain/streaming.mdx b/src/oss/langchain/streaming.mdx
index b3168c0d5..e8bf36bb0 100644
--- a/src/oss/langchain/streaming.mdx
+++ b/src/oss/langchain/streaming.mdx
@@ -335,7 +335,7 @@ def get_weather(city: str) -> str:
return f"It's always sunny in {city}!"
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=[get_weather],
)
diff --git a/src/oss/langchain/structured-output.mdx b/src/oss/langchain/structured-output.mdx
index f67fb47e7..b9cf0d42a 100644
--- a/src/oss/langchain/structured-output.mdx
+++ b/src/oss/langchain/structured-output.mdx
@@ -16,7 +16,7 @@ import AlphaCalloutJS from '/snippets/alpha-lc-callout-js.mdx';
Structured output allows agents to return data in a specific, predictable format. Instead of parsing natural language responses, you get structured data in the form of JSON objects, Pydantic models, or dataclasses that your application can directly use.
-LangChain's `create_agent()` handles structured output automatically. The user sets their desired structured output schema, and when the model generates the structured data, it's captured, validated, and returned in the `'structured_response'` key of the agent's state.
+LangChain's @[`create_agent`] handles structured output automatically. The user sets their desired structured output schema, and when the model generates the structured data, it's captured, validated, and returned in the `'structured_response'` key of the agent's state.
```python
def create_agent(
diff --git a/src/oss/langgraph/add-memory.mdx b/src/oss/langgraph/add-memory.mdx
index f6317451e..b939ca7d6 100644
--- a/src/oss/langgraph/add-memory.mdx
+++ b/src/oss/langgraph/add-memory.mdx
@@ -1257,7 +1257,7 @@ const builder = new StateGraph(MessagesZodState)
from langchain.chat_models import init_chat_model
from langgraph.graph import StateGraph, START, MessagesState
- model = init_chat_model("anthropic:claude-3-7-sonnet-latest")
+ model = init_chat_model("anthropic:claude-sonnet-4-5-20250929")
summarization_model = model.bind(max_tokens=128)
def call_model(state: MessagesState):
@@ -1628,7 +1628,7 @@ const summarizeConversation = async (state: z.infer) => {
from langgraph.checkpoint.memory import InMemorySaver
from langmem.short_term import SummarizationNode, RunningSummary # [!code highlight]
- model = init_chat_model("anthropic:claude-3-7-sonnet-latest")
+ model = init_chat_model("anthropic:claude-sonnet-4-5-20250929")
summarization_model = model.bind(max_tokens=128)
class State(MessagesState):
diff --git a/src/oss/langgraph/graph-api.mdx b/src/oss/langgraph/graph-api.mdx
index faca8381b..ab29bc357 100644
--- a/src/oss/langgraph/graph-api.mdx
+++ b/src/oss/langgraph/graph-api.mdx
@@ -1101,7 +1101,7 @@ graph.addNode("myNode", (state, runtime) => {
### Recursion Limit
:::python
-The recursion limit sets the maximum number of [super-steps](#graphs) the graph can execute during a single execution. Once the limit is reached, LangGraph will raise `GraphRecursionError`. By default this value is set to 25 steps. The recursion limit can be set on any graph at runtime, and is passed to `.invoke`/`.stream` via the config dictionary. Importantly, `recursion_limit` is a standalone `config` key and should not be passed inside the `configurable` key as all other user-defined configuration. See the example below:
+The recursion limit sets the maximum number of [super-steps](#graphs) the graph can execute during a single execution. Once the limit is reached, LangGraph will raise `GraphRecursionError`. By default this value is set to 25 steps. The recursion limit can be set on any graph at runtime, and is passed to `invoke`/`stream` via the config dictionary. Importantly, `recursion_limit` is a standalone `config` key and should not be passed inside the `configurable` key as all other user-defined configuration. See the example below:
```python
graph.invoke(inputs, config={"recursion_limit": 5}, context={"llm": "anthropic"})
@@ -1111,7 +1111,7 @@ Read [this how-to](/oss/langgraph/graph-api#impose-a-recursion-limit) to learn m
:::
:::js
-The recursion limit sets the maximum number of [super-steps](#graphs) the graph can execute during a single execution. Once the limit is reached, LangGraph will raise `GraphRecursionError`. By default this value is set to 25 steps. The recursion limit can be set on any graph at runtime, and is passed to `.invoke`/`.stream` via the config object. Importantly, `recursionLimit` is a standalone `config` key and should not be passed inside the `configurable` key as all other user-defined configuration. See the example below:
+The recursion limit sets the maximum number of [super-steps](#graphs) the graph can execute during a single execution. Once the limit is reached, LangGraph will raise `GraphRecursionError`. By default this value is set to 25 steps. The recursion limit can be set on any graph at runtime, and is passed to `invoke`/`stream` via the config object. Importantly, `recursionLimit` is a standalone `config` key and should not be passed inside the `configurable` key as all other user-defined configuration. See the example below:
```typescript
await graph.invoke(inputs, {
diff --git a/src/oss/langgraph/quickstart.mdx b/src/oss/langgraph/quickstart.mdx
index a353bc4c9..ffbad550b 100644
--- a/src/oss/langgraph/quickstart.mdx
+++ b/src/oss/langgraph/quickstart.mdx
@@ -38,7 +38,7 @@ from langchain.tools import tool
from langchain.chat_models import init_chat_model
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0
)
@@ -92,7 +92,7 @@ import { tool } from "@langchain/core/tools";
import * as z from "zod";
const llm = new ChatAnthropic({
- model: "claude-3-7-sonnet-latest",
+ model: "claude-sonnet-4-5-20250929",
temperature: 0,
});
@@ -378,7 +378,7 @@ from langchain.tools import tool
from langchain.chat_models import init_chat_model
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0
)
@@ -529,7 +529,7 @@ import { tool } from "@langchain/core/tools";
import * as z from "zod";
const llm = new ChatAnthropic({
- model: "claude-3-7-sonnet-latest",
+ model: "claude-sonnet-4-5-20250929",
temperature: 0,
});
@@ -666,7 +666,7 @@ from langchain.tools import tool
from langchain.chat_models import init_chat_model
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0
)
@@ -729,7 +729,7 @@ import { tool } from "@langchain/core/tools";
import * as z from "zod";
const llm = new ChatAnthropic({
- model: "claude-3-7-sonnet-latest",
+ model: "claude-sonnet-4-5-20250929",
temperature: 0,
});
@@ -925,7 +925,7 @@ from langchain.tools import tool
from langchain.chat_models import init_chat_model
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0
)
@@ -1038,7 +1038,7 @@ import { tool } from "@langchain/core/tools";
import * as z from "zod";
const llm = new ChatAnthropic({
- model: "claude-3-7-sonnet-latest",
+ model: "claude-sonnet-4-5-20250929",
temperature: 0,
});
diff --git a/src/oss/langgraph/streaming.mdx b/src/oss/langgraph/streaming.mdx
index b54582412..c038f62cb 100644
--- a/src/oss/langgraph/streaming.mdx
+++ b/src/oss/langgraph/streaming.mdx
@@ -1371,7 +1371,7 @@ Set `disable_streaming=True` when initializing the model.
from langchain.chat_models import init_chat_model
model = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
# Set disable_streaming=True to disable streaming for the chat model
disable_streaming=True # [!code highlight]
diff --git a/src/oss/langgraph/test.mdx b/src/oss/langgraph/test.mdx
index a9454fe1b..c66a83c2f 100644
--- a/src/oss/langgraph/test.mdx
+++ b/src/oss/langgraph/test.mdx
@@ -14,7 +14,7 @@ import AlphaCalloutJS from '/snippets/alpha-lc-callout-js.mdx';
After you've prototyped your LangGraph agent, a natural next step is to add tests. This guide covers some useful patterns you can use when writing unit tests.
-Note that this guide is LangGraph-specific and covers scenarios around graphs with custom structures - if you are just getting started, check out [this section](/oss/langchain/test/) that uses LangChain's built-in `create_agent()` instead.
+Note that this guide is LangGraph-specific and covers scenarios around graphs with custom structures - if you are just getting started, check out [this section](/oss/langchain/test/) that uses LangChain's built-in @[`create_agent`] instead.
## Prerequisites
diff --git a/src/oss/langgraph/use-time-travel.mdx b/src/oss/langgraph/use-time-travel.mdx
index 82fc9a84d..086b73cee 100644
--- a/src/oss/langgraph/use-time-travel.mdx
+++ b/src/oss/langgraph/use-time-travel.mdx
@@ -107,7 +107,7 @@ class State(TypedDict):
llm = init_chat_model(
- "anthropic:claude-3-7-sonnet-latest",
+ "anthropic:claude-sonnet-4-5-20250929",
temperature=0,
)
diff --git a/src/oss/python/integrations/chat/anthropic.mdx b/src/oss/python/integrations/chat/anthropic.mdx
index 25c15417a..dd4205469 100644
--- a/src/oss/python/integrations/chat/anthropic.mdx
+++ b/src/oss/python/integrations/chat/anthropic.mdx
@@ -278,7 +278,7 @@ import json
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(
- model="claude-3-7-sonnet-latest",
+ model="claude-sonnet-4-5-20250929",
max_tokens=5000,
thinking={"type": "enabled", "budget_tokens": 2000},
)
diff --git a/src/oss/python/integrations/chat/nvidia_ai_endpoints.mdx b/src/oss/python/integrations/chat/nvidia_ai_endpoints.mdx
index bc6a4d57f..d85de618b 100644
--- a/src/oss/python/integrations/chat/nvidia_ai_endpoints.mdx
+++ b/src/oss/python/integrations/chat/nvidia_ai_endpoints.mdx
@@ -6,8 +6,7 @@ This will help you get started with NVIDIA [chat models](/oss/langchain/models).
## Overview
-The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on
-NVIDIA NIM inference microservice. NIM supports models across domains like chat, embedding, and re-ranking models
+The `langchain-nvidia-ai-endpoints` package contains LangChain integrations building applications with models on NVIDIA NIM inference microservice. NIM supports models across domains like chat, embedding, and re-ranking models
from the community as well as NVIDIA. These models are optimized by NVIDIA to deliver the best performance on NVIDIA
accelerated infrastructure and deployed as a NIM, an easy-to-use, prebuilt containers that deploy anywhere using a single
command on NVIDIA accelerated infrastructure.
diff --git a/src/oss/python/integrations/chat/reka.mdx b/src/oss/python/integrations/chat/reka.mdx
index 6868e1366..b4794f206 100644
--- a/src/oss/python/integrations/chat/reka.mdx
+++ b/src/oss/python/integrations/chat/reka.mdx
@@ -279,7 +279,7 @@ We can check out the LangSmith trace to make sure it's calling the search tool e
[smith.langchain.com/public/013ef704-654b-4447-8428-637b343d646e/r](https://smith.langchain.com/public/013ef704-654b-4447-8428-637b343d646e/r)
-We've seen how the agent can be called with `.invoke` to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.
+We've seen how the agent can be called with `invoke` to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.
```python
for chunk in agent_executor.stream(
diff --git a/src/oss/python/integrations/providers/all_providers.mdx b/src/oss/python/integrations/providers/all_providers.mdx
index 63e8ceb76..2eacd9baa 100644
--- a/src/oss/python/integrations/providers/all_providers.mdx
+++ b/src/oss/python/integrations/providers/all_providers.mdx
@@ -1067,7 +1067,7 @@ Browse the complete collection of integrations available for Python. LangChain P
Fast inference platform for open-source models.
@@ -1267,7 +1267,7 @@ Browse the complete collection of integrations available for Python. LangChain P
Ultra-fast inference with specialized hardware.
@@ -1755,7 +1755,7 @@ Browse the complete collection of integrations available for Python. LangChain P
Efficient open-source language models.
@@ -1971,7 +1971,7 @@ Browse the complete collection of integrations available for Python. LangChain P
Run large language models locally.
@@ -2651,7 +2651,7 @@ Browse the complete collection of integrations available for Python. LangChain P
Fast inference for open-source models.
@@ -3396,7 +3396,6 @@ Browse the complete collection of integrations available for Python. LangChain P
Mistral's efficient language models.
@@ -3474,7 +3473,6 @@ Browse the complete collection of integrations available for Python. LangChain P
Run large language models locally.
@@ -3609,7 +3607,6 @@ Browse the complete collection of integrations available for Python. LangChain P
Fast inference for open-source models.
diff --git a/src/oss/python/integrations/providers/aws.mdx b/src/oss/python/integrations/providers/aws.mdx
index f079d5a88..bdca4c5fc 100644
--- a/src/oss/python/integrations/providers/aws.mdx
+++ b/src/oss/python/integrations/providers/aws.mdx
@@ -1,6 +1,6 @@
---
title: Overview
-sidebarTitle: AWS
+sidebarTitle: AWS (Amazon)
mode: wide
---
diff --git a/src/oss/python/integrations/retrievers/elasticsearch_retriever.mdx b/src/oss/python/integrations/retrievers/elasticsearch_retriever.mdx
index 78de485ac..65fbf8fd3 100644
--- a/src/oss/python/integrations/retrievers/elasticsearch_retriever.mdx
+++ b/src/oss/python/integrations/retrievers/elasticsearch_retriever.mdx
@@ -374,7 +374,7 @@ custom_mapped_retriever.invoke("foo")
## Usage
-Following the above examples, we use `.invoke` to issue a single query.
+Following the above examples, we use `invoke` to issue a single query.
## API reference
diff --git a/src/oss/python/integrations/retrievers/google_vertex_ai_search.mdx b/src/oss/python/integrations/retrievers/google_vertex_ai_search.mdx
index 8b843c6c3..3aa2624a3 100644
--- a/src/oss/python/integrations/retrievers/google_vertex_ai_search.mdx
+++ b/src/oss/python/integrations/retrievers/google_vertex_ai_search.mdx
@@ -244,7 +244,7 @@ for doc in result:
## Usage
-Following the above examples, we use `.invoke` to issue a single query.
+Following the above examples, we use `invoke` to issue a single query.
## API reference
diff --git a/src/oss/python/integrations/retrievers/graph_rag.mdx b/src/oss/python/integrations/retrievers/graph_rag.mdx
index e2672d983..123ace77a 100644
--- a/src/oss/python/integrations/retrievers/graph_rag.mdx
+++ b/src/oss/python/integrations/retrievers/graph_rag.mdx
@@ -327,7 +327,7 @@ the query.
## Usage
-Following the examples above, `.invoke` is used to initiate retrieval on a query.
+Following the examples above, `invoke` is used to initiate retrieval on a query.
## API reference
diff --git a/src/oss/python/integrations/tools/TEMPLATE.mdx b/src/oss/python/integrations/tools/TEMPLATE.mdx
index 570b2fd88..039332af9 100644
--- a/src/oss/python/integrations/tools/TEMPLATE.mdx
+++ b/src/oss/python/integrations/tools/TEMPLATE.mdx
@@ -113,7 +113,7 @@ from langchain.agents import create_agent
tools = [tool]
agent = create_agent(
- model="anthropic:claude-3-7-sonnet-latest",
+ model="anthropic:claude-sonnet-4-5-20250929",
tools=tools,
)
diff --git a/src/oss/python/migrate/langchain-v1.mdx b/src/oss/python/migrate/langchain-v1.mdx
index ceac24e00..9e6448f06 100644
--- a/src/oss/python/migrate/langchain-v1.mdx
+++ b/src/oss/python/migrate/langchain-v1.mdx
@@ -13,11 +13,11 @@ The `langchain` package namespace has been significantly reduced in v1 to focus
| Module | What's available | Notes |
|-------------------------|------------------------------------------------|-----------------------------------|
-| `langchain.agents` | `create_agent`, `AgentState` | Core agent creation functionality |
-| `langchain.messages` | Message types, content blocks, `trim_messages` | Re-exported from `langchain-core` |
-| `langchain.tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `langchain-core` |
-| `langchain.chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization |
-| `langchain.embeddings` | `init_embeddings`, `Embeddings` | Embedding models |
+| @[`langchain.agents`] | @[`create_agent`], @[`AgentState`] | Core agent creation functionality |
+| @[`langchain.messages`] | Message types, @[content blocks][ContentBlock], @[`trim_messages`] | Re-exported from `langchain-core` |
+| @[`langchain.tools`] | @[`tool`], @[`BaseTool`], injection helpers | Re-exported from `langchain-core` |
+| @[`langchain.chat_models`] | @[`init_chat_model`], @[`BaseChatModel`] | Unified model initialization |
+| @[`langchain.embeddings`] | @[`init_embeddings`], @[`Embeddings`] | Embedding models |
### `langchain-classic`
@@ -64,9 +64,9 @@ uv add langchain-classic
## Migrate to `create_agent`
Prior to v1.0, we recommended using `langgraph.prebuilt.create_react_agent` to build agents.
-Now, we recommend you use `langchain.agents.create_agent` to build agents.
+Now, we recommend you use @[`langchain.agents.create_agent`][create_agent] to build agents.
-The table below outlines what functionality has changed from `create_react_agent` to `create_agent`:
+The table below outlines what functionality has changed from `create_react_agent` to @[`create_agent`]:
| Section | TL;DR - What's changed |
|---------|--------------|
@@ -85,7 +85,7 @@ The table below outlines what functionality has changed from `create_react_agent
### Import path
The import path for the agent prebuilt has changed from `langgraph.prebuilt` to `langchain.agents`.
-The name of the function has changed from `create_react_agent` to `create_agent`:
+The name of the function has changed from `create_react_agent` to @[`create_agent`]:
```python
from langgraph.prebuilt import create_react_agent # [!code --]
@@ -98,7 +98,7 @@ For more information, see [Agents](/oss/langchain/agents).
#### Static prompt rename
-The `prompt` parameter has been renamed to `system_prompt`:
+The `prompt` parameter has been renamed to @[`system_prompt`]:
```python v1 (new)
@@ -123,7 +123,7 @@ agent = create_react_agent(
#### `SystemMessage` to string
-If using `SystemMessage` objects in the system prompt, extract the string content:
+If using @[`SystemMessage`] objects in the system prompt, extract the string content:
```python v1 (new)
@@ -323,7 +323,7 @@ agent = create_react_agent(
### Custom state
-Custom state is now defined in middleware using the `state_schema` attribute:
+Custom state is now defined in middleware using the @[`state_schema`] attribute:
```python v1 (new)
@@ -379,12 +379,12 @@ agent = create_react_agent(
- Custom state is defined by creating a class that extends `AgentState` and assigning it to the middleware's `state_schema` attribute.
+ Custom state is defined by creating a class that extends @[`AgentState`] and assigning it to the middleware's @[`state_schema`] attribute.
#### State type restrictions
-`create_agent` now only supports `TypedDict` for state schemas. Pydantic models and dataclasses are no longer supported.
+@[`create_agent`] now only supports `TypedDict` for state schemas. Pydantic models and dataclasses are no longer supported.
```python v1 (new)
@@ -491,7 +491,7 @@ agent = create_react_agent(
#### Pre-bound models
-To better support structured output, `create_agent` no longer accepts pre-bound models with tools or configuration:
+To better support structured output, @[`create_agent`] no longer accepts pre-bound models with tools or configuration:
```python
# No longer supported
@@ -508,9 +508,9 @@ Dynamic model functions can return pre-bound models if structured output is *not
### Tools
-The `tools` argument to `create_agent` accepts a list of:
+The @[`tools`][create_agent(tools)] argument to @[`create_agent`] accepts a list of:
-* LangChain `BaseTool` instances (functions decorated with `@tool`)
+* LangChain @[`BaseTool`] instances (functions decorated with @[`@tool`])
* Callable objects (functions) with proper type hints and a docstring
* `dict` that represents a built-in provider tools
@@ -674,11 +674,11 @@ result = agent.invoke(
## Standard content
-In v1, messages gain provider-agnostic standard content blocks. Access them via `message.content_blocks` for a consistent, typed view across providers. The existing `message.content` field remains unchanged for strings or provider-native structures.
+In v1, messages gain provider-agnostic standard content blocks. Access them via @[`message.content_blocks`][content_blocks] for a consistent, typed view across providers. The existing @[`message.content`][BaseMessage(content_blocks)] field remains unchanged for strings or provider-native structures.
### What changed
-- New `content_blocks` property on messages for normalized content
+- New @[`content_blocks`][BaseMessage(content_blocks)] property on messages for normalized content
- Standardized block shapes, documented in [Messages](/oss/langchain/messages#standard-content-blocks)
- Optional serialization of standard blocks into `content` via `LC_OUTPUT_VERSION=v1` or `output_version="v1"`
@@ -758,10 +758,11 @@ See the content blocks [reference](/oss/langchain/messages#content-block-referen
Standard content blocks are **not serialized** into the `content` attribute by default. If you need to access standard content blocks in the `content` attribute (e.g., when sending messages to a client), you can opt-in to serializing them into `content`.
-```bash
+```bash Environment variable
export LC_OUTPUT_VERSION=v1
```
-```python
+
+```python Initialization parameter
from langchain.chat_models import init_chat_model
model = init_chat_model(
@@ -783,13 +784,13 @@ The `langchain` package namespace has been significantly reduced in v1 to focus
### Namespace
-| Module | What's available | Notes |
-|--------|------------------|-------|
-| `langchain.agents` | `create_agent`, `AgentState` | Core agent creation functionality |
-| `langchain.messages` | Message types, content blocks, `trim_messages` | Re-exported from `langchain-core` |
-| `langchain.tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `langchain-core` |
-| `langchain.chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization |
-| `langchain.embeddings` | `Embeddings`, `init_embeddings`, | Embedding models |
+| Module | What's available | Notes |
+|-------------------------|------------------------------------------------|-----------------------------------|
+| @[`langchain.agents`] | @[`create_agent`], @[`AgentState`] | Core agent creation functionality |
+| @[`langchain.messages`] | Message types, @[content blocks][ContentBlock], @[`trim_messages`] | Re-exported from `langchain-core` |
+| @[`langchain.tools`] | @[`tool`], @[`BaseTool`], injection helpers | Re-exported from `langchain-core` |
+| @[`langchain.chat_models`] | @[`init_chat_model`], @[`BaseChatModel`] | Unified model initialization |
+| @[`langchain.embeddings`] | @[`init_embeddings`], @[`Embeddings`] | Embedding models |
### `langchain-classic`
@@ -830,20 +831,24 @@ All LangChain packages now require **Python 3.10 or higher**. Python 3.9 reaches
### Updated return type for chat models
-The return type signature for chat model invocation has been fixed from `BaseMessage` to `AIMessage`. Custom chat models implementing `bind_tools` should update their return signature:
+The return type signature for chat model invocation has been fixed from @[`BaseMessage`] to @[`AIMessage`]. Custom chat models implementing @[`bind_tools`][BaseChatModel.bind_tools] should update their return signature:
```python v1 (new)
-Runnable[LanguageModelInput, AIMessage]
+def bind_tools(
+ ...
+ ) -> Runnable[LanguageModelInput, AIMessage]:
```
```python v0 (old)
-Runnable[LanguageModelInput, BaseMessage]
+def bind_tools(
+ ...
+ ) -> Runnable[LanguageModelInput, BaseMessage]:
```
### Default message format for OpenAI Responses API
-When interacting with the Responses API, `langchain-openai` now defaults to storing response items in message `content`. To restore previous behavior, set the `LC_OUTPUT_VERSION` environment variable to `v0`, or specify `output_version="v0"` when instantiating `ChatOpenAI`.
+When interacting with the Responses API, `langchain-openai` now defaults to storing response items in message `content`. To restore previous behavior, set the `LC_OUTPUT_VERSION` environment variable to `v0`, or specify `output_version="v0"` when instantiating @[`ChatOpenAI`].
```python
# Enforce previous behavior with output_version flag
@@ -852,7 +857,7 @@ model = ChatOpenAI(model="gpt-4o-mini", output_version="v0")
### Default `max_tokens` in `langchain-anthropic`
-The `max_tokens` parameter now defaults to higher values based on the model chosen, rather than the previous default of `1024`. If you relied on the old default, explicitly set `max_tokens=1024`.
+The `max_tokens` parameter in `langchain-anthropic` now defaults to higher values based on the model chosen, rather than the previous default of `1024`. If you relied on the old default, explicitly set `max_tokens=1024`.
### Legacy code moved to `langchain-classic`
@@ -866,17 +871,12 @@ Methods, functions, and other objects that were already deprecated and slated fo
Use of the `.text()` method on message objects should drop the parentheses:
-
-```python v1 (new)
+```python
# Property access
text = response.text
-# deprecated method call
-text = response.text()
-```
-```python v0 (old)
+# Deprecated method call
text = response.text()
```
-
Existing usage patterns (i.e., `.text()`) will continue to function but now emit a warning.
diff --git a/src/oss/python/releases/langchain-v1.mdx b/src/oss/python/releases/langchain-v1.mdx
index 28d6e752c..0504e91a6 100644
--- a/src/oss/python/releases/langchain-v1.mdx
+++ b/src/oss/python/releases/langchain-v1.mdx
@@ -42,7 +42,7 @@ For a complete list of changes, see the [migration guide](/oss/migrate/langchain
## `create_agent`
-`create_agent` is the standard way to build agents in LangChain 1.0. It provides a simpler interface than `langgraph.prebuilt.create_react_agent` while offering greater customization potential by using [middleware](#middleware).
+@[`create_agent`] is the standard way to build agents in LangChain 1.0. It provides a simpler interface than `langgraph.prebuilt.create_react_agent` while offering greater customization potential by using [middleware](#middleware).
```python
from langchain.agents import create_agent
@@ -60,7 +60,7 @@ result = agent.invoke({
})
```
-Under the hood, `create_agent` is built on the basic agent loop -- calling a model, letting it choose tools to execute, and then finishing when it calls no more tools:
+Under the hood, @[`create_agent`] is built on the basic agent loop -- calling a model, letting it choose tools to execute, and then finishing when it calls no more tools:
-Build custom middleware by implementing any of these hooks on a subclass of the `AgentMiddleware` class:
+Build custom middleware by implementing any of these hooks on a subclass of the @[`AgentMiddleware`] class:
| Hook | When it runs | Use cases |
|-------------------|--------------------------|-----------------------------------------|
@@ -194,7 +194,7 @@ For more information, see [the complete middleware guide](/oss/langchain/middlew
### Built on LangGraph
-Because `create_agent` is built on [LangGraph](/oss/langgraph), you automatically get built in support for long running and reliable agents via:
+Because @[`create_agent`] is built on [LangGraph](/oss/langgraph), you automatically get built in support for long running and reliable agents via:
@@ -215,7 +215,7 @@ You don't need to learn LangGraph to use these features—they work out of the b
### Structured output
-`create_agent` has improved structured output generation:
+@[`create_agent`] has improved structured output generation:
- **Main loop integration**: Structured output is now generated in the main loop instead of requiring an additional LLM call
- **Structured output strategy**: Models can choose between calling tools or using provider-side structured output generation
@@ -268,7 +268,7 @@ print(repr(result["structured_response"]))
Broader support for content blocks will be rolled out gradually across more providers.
-The new `content_blocks` property introduces a standard representation for message content that works across providers:
+The new @[`content_blocks`][BaseMessage(content_blocks)] property introduces a standard representation for message content that works across providers:
```python
from langchain_anthropic import ChatAnthropic
@@ -298,17 +298,17 @@ For more information, see our guide on [content blocks](/oss/langchain/messages#
## Simplified package
-LangChain v1 streamlines the `langchain` package namespace to focus on essential building blocks for agents. The refined namespace exposes the most useful and relevant functionality:
+LangChain v1 streamlines the [`langchain`](https://pypi.org/project/langchain/) package namespace to focus on essential building blocks for agents. The refined namespace exposes the most useful and relevant functionality:
### Namespace
| Module | What's available | Notes |
|--------|------------------|-------|
-| `langchain.agents` | `create_agent`, `AgentState` | Core agent creation functionality |
-| `langchain.messages` | Message types, content blocks, `trim_messages` | Re-exported from `langchain-core` |
-| `langchain.tools` | `tool`, `BaseTool`, injection helpers | Re-exported from `langchain-core` |
-| `langchain.chat_models` | `init_chat_model`, `BaseChatModel` | Unified model initialization |
-| `langchain.embeddings` | `Embeddings`, `init_embeddings` | Embedding models |
+| @[`langchain.agents`] | @[`create_agent`], @[`AgentState`] | Core agent creation functionality |
+| @[`langchain.messages`] | Message types, @[content blocks][ContentBlock], @[`trim_messages`] | Re-exported from @[`langchain-core`] |
+| @[`langchain.tools`] | @[`@tool`], @[`BaseTool`], injection helpers | Re-exported from @[`langchain-core`] |
+| @[`langchain.chat_models`] | @[`init_chat_model`], @[`BaseChatModel`] | Unified model initialization |
+| @[`langchain.embeddings`] | @[`Embeddings`], @[`init_embeddings`] | Embedding models |
Most of these are re-exported from `langchain-core` for convenience, which gives you a focused API surface for building agents.
diff --git a/src/snippets/oss/studio.mdx b/src/snippets/oss/studio.mdx
index 8af5f0b8f..d69024091 100644
--- a/src/snippets/oss/studio.mdx
+++ b/src/snippets/oss/studio.mdx
@@ -88,7 +88,7 @@ Inside your app's directory, create a configuration file `langgraph.json`:
}
```
-`create_agent()` automatically returns a compiled LangGraph graph that we can pass to the `graphs` key in our configuration file.
+@[`create_agent`] automatically returns a compiled LangGraph graph that we can pass to the `graphs` key in our configuration file.
See the [LangGraph configuration file reference](/langsmith/cli#configuration-file) for detailed explanations of each key in the JSON object of the configuration file.
diff --git a/src/snippets/oss/ui-py.mdx b/src/snippets/oss/ui-py.mdx
index d3a8f3222..40e289448 100644
--- a/src/snippets/oss/ui-py.mdx
+++ b/src/snippets/oss/ui-py.mdx
@@ -1,4 +1,4 @@
-LangChain provides a powerful prebuilt user interface that work seamlessly with agents created using [`create_agent()`](/oss/python/langchain/agents). This UI is designed to provide rich, interactive experiences for your agents with minimal setup, whether you're running locally or in a deployed context (such as [LangSmith](/langsmith/)).
+LangChain provides a powerful prebuilt user interface that work seamlessly with agents created using [`create_agent`](/oss/python/langchain/agents). This UI is designed to provide rich, interactive experiences for your agents with minimal setup, whether you're running locally or in a deployed context (such as [LangSmith](/langsmith/)).
## Agent Chat UI