diff --git a/src/oss/langchain/agents.mdx b/src/oss/langchain/agents.mdx
index 5f18b8df6..bb516cbd9 100644
--- a/src/oss/langchain/agents.mdx
+++ b/src/oss/langchain/agents.mdx
@@ -128,10 +128,10 @@ Model instances give you complete control over configuration. Use them when you
#### Dynamic model
-:::python
-
Dynamic models are selected at runtime based on the current state and context. This enables sophisticated routing logic and cost optimization.
+:::python
+
To use a dynamic model, you need to provide a function that receives the graph state and runtime and returns an instance of `BaseChatModel` with the tools bound to it using `.bind_tools(tools)`, where `tools` is a subset of the `tools` parameter.
```python
@@ -153,11 +153,6 @@ agent = create_agent(select_model, tools=tools)
```
:::
:::js
-
-**`state`**: The data that flows through your agent's execution, including messages, custom fields, and any information that needs to be tracked and potentially modified during processing (e.g. user preferences or tool usage stats).
-
-
-Dynamic models are selected at runtime based on the current state and context. This enables sophisticated routing logic and cost optimization.
To use a dynamic model, you need to provide a function that receives the graph state and runtime and returns an instance of `BaseChatModel` with the tools bound to it using `.bindTools(tools)`, where `tools` is a subset of the `tools` parameter.
@@ -465,8 +460,95 @@ const agent = createAgent({
When no `prompt` is provided, the agent will infer its task from the messages directly.
+#### Dynamic prompts with middleware
+
+:::python
+For more advanced use cases where you need to modify the system prompt based on runtime context or agent state, you can use the `modify_model_request` decorator to create a simple custom middleware.
+:::
+:::js
+For more advanced use cases where you need to modify the system prompt based on runtime context or agent state, you can use the `modifyModelRequest` decorator to create a simple custom middleware.
+:::
+
+Dynamic system prompt is especially useful for personalizing prompts based on user roles, conversation context, or other changing factors:
+
+:::python
+```python wrap
+from langchain.agents import create_agent, AgentState
+from langchain.agents.middleware.types import modify_model_request
+from langgraph.runtime import Runtime
+from typing import TypedDict
+
+class Context(TypedDict):
+ user_role: str
+
+@modify_model_request
+def dynamic_system_prompt(state: AgentState, request: ModelRequest, runtime: Runtime[Context]) -> ModelRequest:
+ user_role = runtime.context.get("user_role", "user")
+ base_prompt = "You are a helpful assistant."
+
+ if user_role == "expert":
+ prompt = f"{base_prompt} Provide detailed technical responses."
+ elif user_role == "beginner":
+ prompt = f"{base_prompt} Explain concepts simply and avoid jargon."
+ else:
+ prompt = base_prompt
+
+ request.system_prompt = prompt
+ return request
+
+agent = create_agent(
+ model="openai:gpt-4o",
+ tools=tools,
+ middleware=[dynamic_system_prompt],
+)
+
+# The system prompt will be set dynamically based on context
+result = agent.invoke(
+ {"messages": [{"role": "user", "content": "Explain machine learning"}]},
+ {"context": {"user_role": "expert"}}
+)
+```
+:::
+
+:::js
+```typescript wrap
+import { z } from "zod";
+import { createAgent } from "langchain";
+import { dynamicSystemPromptMiddleware } from "langchain/middleware";
+
+const contextSchema = z.object({
+ userRole: z.enum(["expert", "beginner"]),
+});
+
+const agent = createAgent({
+ model: "openai:gpt-4o",
+ tools: [/* ... */],
+ contextSchema,
+ middleware: [
+ dynamicSystemPromptMiddleware>((state, runtime) => {
+ const userRole = runtime.context.userRole || "user";
+ const basePrompt = "You are a helpful assistant.";
+
+ if (userRole === "expert") {
+ return `${basePrompt} Provide detailed technical responses.`;
+ } else if (userRole === "beginner") {
+ return `${basePrompt} Explain concepts simply and avoid jargon.`;
+ }
+ return basePrompt;
+ }),
+ ],
+});
+
+// The system prompt will be set dynamically based on context
+const result = await agent.invoke(
+ { messages: [{ role: "user", content: "Explain machine learning" }] },
+ { context: { userRole: "expert" } }
+);
+```
+:::
+
-For more details on message types and formatting, see [Messages](/oss/langchain/messages).
+For more details on message types and formatting, see [Messages](/oss/langchain/messages). For comprehensive middleware documentation, see [Middleware](/oss/langchain/middleware).
## Advanced configuration
diff --git a/src/oss/langchain/middleware.mdx b/src/oss/langchain/middleware.mdx
index 9a528dc88..1a38ebd69 100644
--- a/src/oss/langchain/middleware.mdx
+++ b/src/oss/langchain/middleware.mdx
@@ -151,6 +151,7 @@ LangChain provides several built in middleware to use off-the-shelf
- [Summarization](#summarization)
- [Human-in-the-loop](#human-in-the-loop)
- [Anthropic prompt caching](#anthropic-prompt-caching)
+- [Dynamic system prompt](#dynamic-system-prompt)
### Summarization
@@ -467,6 +468,138 @@ const result = await agent.invoke({ messages: [HumanMessage("What's my name?")]
```
:::
+### Dynamic system prompt
+
+:::python
+A system prompt can be dynamically set right before each model invocation using the `@modify_model_request` decorator. This middleware is particularly useful when the prompt depends on the current agent state or runtime context.
+
+For example, you can adjust the system prompt based on the user's expertise level:
+
+```python
+from typing import TypedDict
+
+from langchain.agents import create_agent, AgentState
+from langchain.agents.middleware.types import modify_model_request
+from langgraph.runtime import Runtime
+
+class Context(TypedDict):
+ user_role: str
+
+@modify_model_request
+def dynamic_system_prompt(state: AgentState, request: ModelRequest, runtime: Runtime[Context]) -> ModelRequest:
+ user_role = runtime.context.get("user_role", "user")
+ base_prompt = "You are a helpful assistant."
+
+ if user_role == "expert":
+ prompt = f"{base_prompt} Provide detailed technical responses."
+ elif user_role == "beginner":
+ prompt = f"{base_prompt} Explain concepts simply and avoid jargon."
+ else:
+ prompt = base_prompt
+
+ request.system_prompt = prompt
+ return request
+
+agent = create_agent(
+ model="openai:gpt-4o",
+ tools=[web_search],
+ middleware=[dynamic_system_prompt],
+)
+
+# Use with context
+result = agent.invoke(
+ {"messages": [{"role": "user", "content": "Explain async programming"}]},
+ {"context": {"user_role": "expert"}}
+)
+```
+:::
+:::js
+
+A system prompt can be dynamically set right before each model invocation using the `dynamicSystemPromptMiddleware` middleware. This middleware is particularly useful when the prompt depends on the current agent state or runtime context.
+
+For example, you can adjust the system prompt based on the user's expertise level:
+
+```typescript
+import { z } from "zod";
+import { createAgent } from "langchain";
+import { dynamicSystemPromptMiddleware } from "langchain/middleware";
+
+const contextSchema = z.object({
+ userRole: z.enum(["expert", "beginner"]),
+});
+
+const agent = createAgent({
+ model: "openai:gpt-4o",
+ tools: [...],
+ contextSchema,
+ middleware: [
+ dynamicSystemPromptMiddleware>((state, runtime) => {
+ const userRole = runtime.context.userRole || "user";
+ const basePrompt = "You are a helpful assistant.";
+
+ if (userRole === "expert") {
+ return `${basePrompt} Provide detailed technical responses.`;
+ } else if (userRole === "beginner") {
+ return `${basePrompt} Explain concepts simply and avoid jargon.`;
+ }
+ return basePrompt;
+ }),
+ ],
+});
+
+// The system prompt will be set dynamically based on context
+const result = await agent.invoke(
+ { messages: [{ role: "user", content: "Explain async programming" }] },
+ { context: { userRole: "expert" } }
+);
+```
+:::
+
+Alternatively, you can adjust the system prompt based on the conversation length:
+
+:::python
+```python
+from langchain.agents.middleware.types import modify_model_request
+
+@modify_model_request
+def simple_prompt(state: AgentState, request: ModelRequest) -> ModelRequest:
+ message_count = len(state["messages"])
+
+ if message_count > 10:
+ prompt = "You are in an extended conversation. Be more concise."
+ else:
+ prompt = "You are a helpful assistant."
+
+ request.system_prompt = prompt
+ return request
+
+agent = create_agent(
+ model="openai:gpt-4o",
+ tools=[search_tool],
+ middleware=[simple_prompt],
+)
+```
+:::
+
+:::js
+```typescript
+const agent = createAgent({
+ model: "openai:gpt-4o",
+ tools: [searchTool],
+ middleware: [
+ dynamicSystemPromptMiddleware((state) => {
+ const messageCount = state.messages.length;
+
+ if (messageCount > 10) {
+ return "You are in an extended conversation. Be more concise.";
+ }
+ return "You are a helpful assistant.";
+ }),
+ ],
+});
+```
+:::
+
## Custom Middleware
Middleware for agents are subclasses of `AgentMiddleware`, which implement one or more of its hooks.