From 61cf931aba6ce0fcea940ebfbb58473edfc3492a Mon Sep 17 00:00:00 2001 From: Mason Daugherty Date: Thu, 30 Oct 2025 14:27:19 -0400 Subject: [PATCH] chore: use latest chat model names --- src/langsmith/define-target-function.mdx | 2 +- src/langsmith/evaluate-complex-agent.mdx | 4 +- src/langsmith/evaluate-graph.mdx | 4 +- src/langsmith/generative-ui-react.mdx | 4 +- .../human-in-the-loop-time-travel.mdx | 2 +- src/langsmith/log-llm-trace.mdx | 2 +- .../manage-prompts-programmatically.mdx | 2 +- src/langsmith/observability-studio.mdx | 4 +- src/langsmith/server-mcp.mdx | 2 +- src/langsmith/streaming.mdx | 2 +- src/langsmith/test-react-agent-pytest.mdx | 4 +- src/langsmith/trace-claude-agent-sdk.mdx | 2 +- src/langsmith/trajectory-evals.mdx | 16 +-- src/oss/concepts/context.mdx | 8 +- src/oss/deepagents/customization.mdx | 2 +- src/oss/deepagents/human-in-the-loop.mdx | 2 +- src/oss/deepagents/middleware.mdx | 14 +- src/oss/deepagents/subagents.mdx | 8 +- .../integrations/chat/anthropic.mdx | 26 ++-- src/oss/javascript/migrate/langchain-v1.mdx | 28 ++-- src/oss/javascript/releases/langchain-v1.mdx | 10 +- src/oss/langchain/agents.mdx | 20 +-- src/oss/langchain/context-engineering.mdx | 125 +++++++++++------- .../errors/MESSAGE_COERCION_FAILURE.mdx | 2 +- src/oss/langchain/guardrails.mdx | 30 ++--- src/oss/langchain/human-in-the-loop.mdx | 4 +- src/oss/langchain/long-term-memory.mdx | 8 +- src/oss/langchain/mcp.mdx | 4 +- src/oss/langchain/messages.mdx | 8 +- src/oss/langchain/middleware.mdx | 100 +++++++------- src/oss/langchain/models.mdx | 26 ++-- src/oss/langchain/observability.mdx | 4 +- src/oss/langchain/overview.mdx | 4 +- src/oss/langchain/quickstart.mdx | 18 +-- src/oss/langchain/retrieval.mdx | 6 +- src/oss/langchain/runtime.mdx | 8 +- src/oss/langchain/short-term-memory.mdx | 26 ++-- src/oss/langchain/sql-agent.mdx | 2 +- src/oss/langchain/streaming.mdx | 14 +- src/oss/langchain/structured-output.mdx | 24 ++-- src/oss/langchain/supervisor.mdx | 4 +- src/oss/langchain/test.mdx | 16 +-- src/oss/langgraph/add-memory.mdx | 34 ++--- src/oss/langgraph/agentic-rag.mdx | 4 +- src/oss/langgraph/interrupts.mdx | 4 +- src/oss/langgraph/quickstart.mdx | 16 +-- src/oss/langgraph/streaming.mdx | 14 +- src/oss/langgraph/thinking-in-langgraph.mdx | 4 +- src/oss/langgraph/use-functional-api.mdx | 8 +- src/oss/langgraph/use-graph-api.mdx | 18 +-- src/oss/langgraph/use-time-travel.mdx | 4 +- src/oss/langgraph/workflows-agents.mdx | 4 +- .../integrations/callbacks/llmonitor.mdx | 2 +- .../python/integrations/chat/anthropic.mdx | 42 +++--- src/oss/python/integrations/chat/bedrock.mdx | 2 +- .../python/integrations/providers/cratedb.mdx | 4 +- .../integrations/providers/moorcheh.mdx | 2 +- .../integrations/providers/tensorlake.mdx | 2 +- .../python/integrations/tools/TEMPLATE.mdx | 2 +- src/oss/python/integrations/tools/bash.mdx | 2 +- .../python/integrations/tools/google_jobs.mdx | 2 +- src/oss/python/integrations/tools/graphql.mdx | 2 +- .../integrations/tools/openweathermap.mdx | 2 +- .../python/integrations/tools/playwright.mdx | 2 +- src/oss/python/integrations/tools/privy.mdx | 4 +- src/oss/python/integrations/tools/riza.mdx | 2 +- .../python/integrations/tools/salesforce.mdx | 2 +- .../python/integrations/tools/searchapi.mdx | 2 +- src/oss/python/integrations/tools/steam.mdx | 2 +- .../integrations/tools/yahoo_finance_news.mdx | 2 +- .../integrations/vectorstores/moorcheh.mdx | 2 +- src/oss/python/migrate/langchain-v1.mdx | 48 +++---- src/oss/python/releases/langchain-v1.mdx | 16 +-- src/snippets/chat-model-tabs-js.mdx | 6 +- src/snippets/chat-model-tabs.mdx | 6 +- src/snippets/oss/studio.mdx | 2 +- src/snippets/oss/ui-js.mdx | 2 +- src/snippets/trace-with-anthropic.mdx | 2 +- 78 files changed, 456 insertions(+), 419 deletions(-) diff --git a/src/langsmith/define-target-function.mdx b/src/langsmith/define-target-function.mdx index e38279b482..77aca020f9 100644 --- a/src/langsmith/define-target-function.mdx +++ b/src/langsmith/define-target-function.mdx @@ -87,7 +87,7 @@ const target = async(inputs) => { ```python Python (LangChain) from langchain.chat_models import init_chat_model -model = init_chat_model("openai:gpt-4o-mini") +model = init_chat_model("gpt-4o-mini") def target(inputs: dict) -> dict: # This assumes your dataset has inputs with a `messages` key diff --git a/src/langsmith/evaluate-complex-agent.mdx b/src/langsmith/evaluate-complex-agent.mdx index 9451060249..55977866d5 100644 --- a/src/langsmith/evaluate-complex-agent.mdx +++ b/src/langsmith/evaluate-complex-agent.mdx @@ -322,7 +322,7 @@ def lookup_album( ... def lookup_artist( ... # Agent model -qa_llm = init_chat_model("claude-3-5-sonnet-latest") +qa_llm = init_chat_model("claude-sonnet-4-5-20250929") # The prebuilt ReACT agent only expects State to have a 'messages' key, so the # state we defined for the refund agent can also be passed to our lookup agent. qa_graph = create_agent(qa_llm, tools=[lookup_track, lookup_artist, lookup_album]) @@ -1260,7 +1260,7 @@ def lookup_artist( # Agent model -qa_llm = init_chat_model("claude-3-5-sonnet-latest") +qa_llm = init_chat_model("claude-sonnet-4-5-20250929") # The prebuilt ReACT agent only expects State to have a 'messages' key, so the # state we defined for the refund agent can also be passed to our lookup agent. qa_graph = create_agent(qa_llm, [lookup_track, lookup_artist, lookup_album]) diff --git a/src/langsmith/evaluate-graph.mdx b/src/langsmith/evaluate-graph.mdx index 19121cbb5a..95e2eb5388 100644 --- a/src/langsmith/evaluate-graph.mdx +++ b/src/langsmith/evaluate-graph.mdx @@ -42,7 +42,7 @@ def search(query: str) -> str: tools = [search] tool_node = ToolNode(tools) -model = init_chat_model("claude-3-5-sonnet-latest").bind_tools(tools) +model = init_chat_model("claude-sonnet-4-5-20250929").bind_tools(tools) # Define the function that determines whether to continue or not def should_continue(state: State) -> Literal["tools", END]: @@ -282,7 +282,7 @@ def search(query: str) -> str: tools = [search] tool_node = ToolNode(tools) -model = init_chat_model("claude-3-5-sonnet-latest").bind_tools(tools) +model = init_chat_model("claude-sonnet-4-5-20250929").bind_tools(tools) # Define the function that determines whether to continue or not def should_continue(state: State) -> Literal["tools", END]: diff --git a/src/langsmith/generative-ui-react.mdx b/src/langsmith/generative-ui-react.mdx index ba822bea87..1e0239def4 100644 --- a/src/langsmith/generative-ui-react.mdx +++ b/src/langsmith/generative-ui-react.mdx @@ -367,7 +367,7 @@ Then you can push updates to the UI component by calling `ui.push()` / `push_ui_ async def writer_node(state: AgentState): - model = ChatAnthropic(model="claude-3-5-sonnet-latest") + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") message: AIMessage = await model.bind_tools( tools=[CreateTextDocument], tool_choice={"type": "tool", "name": "CreateTextDocument"}, @@ -432,7 +432,7 @@ Then you can push updates to the UI component by calling `ui.push()` / `push_ui_ ): Promise { const ui = typedUi(config); - const model = new ChatAnthropic({ model: "claude-3-5-sonnet-latest" }); + const model = new ChatAnthropic({ model: "claude-sonnet-4-5-20250929" }); const message = await model .bindTools( [ diff --git a/src/langsmith/human-in-the-loop-time-travel.mdx b/src/langsmith/human-in-the-loop-time-travel.mdx index 0380579e4b..c74bd90805 100644 --- a/src/langsmith/human-in-the-loop-time-travel.mdx +++ b/src/langsmith/human-in-the-loop-time-travel.mdx @@ -27,7 +27,7 @@ To time travel using the LangGraph Server API (via the LangGraph SDK): joke: NotRequired[str] model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0, ) diff --git a/src/langsmith/log-llm-trace.mdx b/src/langsmith/log-llm-trace.mdx index 4c82dc72c0..bde1282e39 100644 --- a/src/langsmith/log-llm-trace.mdx +++ b/src/langsmith/log-llm-trace.mdx @@ -397,7 +397,7 @@ def chat_model(inputs: dict) -> dict: When using a custom model, it is recommended to also provide the following `metadata` fields to identify the model when viewing traces and when filtering. * `ls_provider`: The provider of the model, eg "openai", "anthropic", etc. -* `ls_model_name`: The name of the model, eg "gpt-4o-mini", "claude-3-opus-20240307", etc. +* `ls_model_name`: The name of the model, eg "gpt-4o-mini", "claude-3-opus-20240229", etc. diff --git a/src/langsmith/manage-prompts-programmatically.mdx b/src/langsmith/manage-prompts-programmatically.mdx index ab103ef581..0ef9262598 100644 --- a/src/langsmith/manage-prompts-programmatically.mdx +++ b/src/langsmith/manage-prompts-programmatically.mdx @@ -349,7 +349,7 @@ const { messages, system } = convertPromptToAnthropic(formattedPrompt); const anthropicClient = new Anthropic(); const anthropicResponse = await anthropicClient.messages.create({ - model: "claude-3-haiku-20240307", + model: "claude-haiku-4-5-20251001", system, messages, max_tokens: 1024, diff --git a/src/langsmith/observability-studio.mdx b/src/langsmith/observability-studio.mdx index 8051d73208..26405b953c 100644 --- a/src/langsmith/observability-studio.mdx +++ b/src/langsmith/observability-studio.mdx @@ -78,8 +78,8 @@ class Configuration(BaseModel): model: Annotated[ Literal[ - "anthropic/claude-sonnet-4-5", - "anthropic/claude-3-5-haiku-latest", + "anthropic/claude-sonnet-4-5-20250929", + "anthropic/claude-haiku-4-5-20251001", "openai/o1", "openai/gpt-4o-mini", "openai/o1-mini", diff --git a/src/langsmith/server-mcp.mdx b/src/langsmith/server-mcp.mdx index 432dfd642e..cefcc544a6 100644 --- a/src/langsmith/server-mcp.mdx +++ b/src/langsmith/server-mcp.mdx @@ -154,7 +154,7 @@ Use an MCP-compliant client to connect to the LangGraph server. The following ex tools = await load_mcp_tools(session) # Create and run a react agent with the tools - agent = create_agent("openai:gpt-4.1", tools) + agent = create_agent("gpt-4.1", tools) # Invoke the agent with a message agent_response = await agent.ainvoke({"messages": "What can the finance agent do for me?"}) diff --git a/src/langsmith/streaming.mdx b/src/langsmith/streaming.mdx index d92c9ead75..71e639c1b3 100644 --- a/src/langsmith/streaming.mdx +++ b/src/langsmith/streaming.mdx @@ -649,7 +649,7 @@ The streamed output from [`messages-tuple` mode](#supported-stream-modes) is a t topic: str joke: str = "" - model = init_chat_model(model="openai:gpt-4o-mini") + model = init_chat_model(model="gpt-4o-mini") def call_model(state: MyState): """Call the LLM to generate a joke about a topic""" diff --git a/src/langsmith/test-react-agent-pytest.mdx b/src/langsmith/test-react-agent-pytest.mdx index e0404c9c67..1a46630ec1 100644 --- a/src/langsmith/test-react-agent-pytest.mdx +++ b/src/langsmith/test-react-agent-pytest.mdx @@ -194,7 +194,7 @@ class AgentOutputFormat(TypedDict): reasoning: Annotated[str, ..., "The reasoning behind the answer"] agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=[code_tool, search_tool, polygon_aggregates], response_format=AgentOutputFormat, system_prompt="You are a financial expert. Respond to the users query accurately", @@ -766,7 +766,7 @@ Remember to also add the config files for [Vitest](#config-files-for-vitestjest) reasoning: Annotated[str, ..., "The reasoning behind the answer"] agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=[code_tool, search_tool, polygon_aggregates], response_format=AgentOutputFormat, system_prompt="You are a financial expert. Respond to the users query accurately", diff --git a/src/langsmith/trace-claude-agent-sdk.mdx b/src/langsmith/trace-claude-agent-sdk.mdx index d2aa230070..9b6d874363 100644 --- a/src/langsmith/trace-claude-agent-sdk.mdx +++ b/src/langsmith/trace-claude-agent-sdk.mdx @@ -70,7 +70,7 @@ async def main(): ) options = ClaudeAgentOptions( - model="claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", system_prompt="You are a friendly travel assistant who helps with weather information.", mcp_servers={"weather": weather_server}, allowed_tools=["mcp__weather__get_weather"], diff --git a/src/langsmith/trajectory-evals.mdx b/src/langsmith/trajectory-evals.mdx index 4ae3ffeeaf..81cb5d885e 100644 --- a/src/langsmith/trajectory-evals.mdx +++ b/src/langsmith/trajectory-evals.mdx @@ -70,7 +70,7 @@ def get_weather(city: str): """Get weather information for a city.""" return f"It's 75 degrees and sunny in {city}." -agent = create_agent("openai:gpt-4o", tools=[get_weather]) +agent = create_agent("gpt-4o", tools=[get_weather]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="strict", # [!code highlight] @@ -121,7 +121,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather] }); @@ -187,7 +187,7 @@ def get_events(city: str): """Get events happening in a city.""" return f"Concert at the park in {city} tonight." -agent = create_agent("openai:gpt-4o", tools=[get_weather, get_events]) +agent = create_agent("gpt-4o", tools=[get_weather, get_events]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="unordered", # [!code highlight] @@ -249,7 +249,7 @@ const getEvents = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather, getEvents] }); @@ -325,7 +325,7 @@ def get_detailed_forecast(city: str): """Get detailed weather forecast for a city.""" return f"Detailed forecast for {city}: sunny all week." -agent = create_agent("openai:gpt-4o", tools=[get_weather, get_detailed_forecast]) +agent = create_agent("gpt-4o", tools=[get_weather, get_detailed_forecast]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="superset", # [!code highlight] @@ -388,7 +388,7 @@ const getDetailedForecast = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather, getDetailedForecast] }); @@ -460,7 +460,7 @@ def get_weather(city: str): """Get weather information for a city.""" return f"It's 75 degrees and sunny in {city}." -agent = create_agent("openai:gpt-4o", tools=[get_weather]) +agent = create_agent("gpt-4o", tools=[get_weather]) evaluator = create_trajectory_llm_as_judge( # [!code highlight] model="openai:o3-mini", # [!code highlight] @@ -502,7 +502,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather] }); diff --git a/src/oss/concepts/context.mdx b/src/oss/concepts/context.mdx index e20edd538d..4c077c1321 100644 --- a/src/oss/concepts/context.mdx +++ b/src/oss/concepts/context.mdx @@ -64,7 +64,7 @@ graph.invoke( return f"You are a helpful assistant. Address the user as {user_name}." agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[get_weather], middleware=[personalized_prompt], context_schema=ContextSchema @@ -145,6 +145,7 @@ await graph.invoke( State can also be accessed by the agent's **tools**, which can read or update the state as needed. See [tool calling guide](/oss/langchain/tools#short-term-memory) for details. :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import dynamic_prompt, ModelRequest @@ -160,7 +161,7 @@ await graph.invoke( return f"You are a helpful assistant. User's name is {user_name}" agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[...], state_schema=CustomState, # [!code highlight] middleware=[personalized_prompt], # [!code highlight] @@ -174,6 +175,7 @@ await graph.invoke( ::: :::js + ```typescript import { createAgent, createMiddleware } from "langchain"; import type { AgentState } from "langchain"; @@ -194,7 +196,7 @@ await graph.invoke( }); const agent = createAgent({ // [!code highlight] - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [/* your tools here */], middleware: [personalizedPrompt] as const, // [!code highlight] }); diff --git a/src/oss/deepagents/customization.mdx b/src/oss/deepagents/customization.mdx index da67ad3483..92d2542661 100644 --- a/src/oss/deepagents/customization.mdx +++ b/src/oss/deepagents/customization.mdx @@ -14,7 +14,7 @@ from langchain.chat_models import init_chat_model from deepagents import create_deep_agent model = init_chat_model( - model="openai:gpt-5", + model="gpt-5", ) agent = create_deep_agent( model=model, diff --git a/src/oss/deepagents/human-in-the-loop.mdx b/src/oss/deepagents/human-in-the-loop.mdx index 24f7925e61..5e96a27ee4 100644 --- a/src/oss/deepagents/human-in-the-loop.mdx +++ b/src/oss/deepagents/human-in-the-loop.mdx @@ -38,7 +38,7 @@ def send_email(to: str, subject: str, body: str) -> str: checkpointer = MemorySaver() agent = create_deep_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", tools=[delete_file, read_file, send_email], interrupt_on={ "delete_file": True, # Default: approve, edit, reject diff --git a/src/oss/deepagents/middleware.mdx b/src/oss/deepagents/middleware.mdx index d73ac28e27..4a8d58730b 100644 --- a/src/oss/deepagents/middleware.mdx +++ b/src/oss/deepagents/middleware.mdx @@ -30,7 +30,7 @@ from langchain.agents.middleware import TodoListMiddleware # TodoListMiddleware is included by default in create_deep_agent # You can customize it if building a custom agent agent = create_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", # Custom planning instructions can be added via middleware middleware=[ TodoListMiddleware( @@ -65,7 +65,7 @@ from deepagents.middleware.filesystem import FilesystemMiddleware # FilesystemMiddleware is included by default in create_deep_agent # You can customize it if building a custom agent agent = create_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", middleware=[ FilesystemMiddleware( long_term_memory=False, # Enables access to long-term memory, defaults to False. You must attach a store to use long-term memory. @@ -92,7 +92,7 @@ from langgraph.store.memory import InMemoryStore store = InMemoryStore() agent = create_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", store=store, middleware=[ FilesystemMiddleware( @@ -133,10 +133,10 @@ def get_weather(city: str) -> str: return f"The weather in {city} is sunny." agent = create_agent( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", middleware=[ SubAgentMiddleware( - default_model="claude-sonnet-4-20250514", + default_model="claude-sonnet-4-5-20250929", default_tools=[], subagents=[ { @@ -180,10 +180,10 @@ weather_subagent = CompiledSubAgent( ) agent = create_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", middleware=[ SubAgentMiddleware( - default_model="claude-sonnet-4-20250514", + default_model="claude-sonnet-4-5-20250929", default_tools=[], subagents=[weather_subagent], ) diff --git a/src/oss/deepagents/subagents.mdx b/src/oss/deepagents/subagents.mdx index 2c0b244c93..e2d1169507 100644 --- a/src/oss/deepagents/subagents.mdx +++ b/src/oss/deepagents/subagents.mdx @@ -82,7 +82,7 @@ research_subagent = { subagents = [research_subagent] agent = create_deep_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", subagents=subagents ) ``` @@ -113,7 +113,7 @@ custom_subagent = CompiledSubAgent( subagents = [custom_subagent] agent = create_deep_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", tools=[internet_search], system_prompt=research_instructions, subagents=subagents @@ -223,7 +223,7 @@ subagents = [ "description": "Reviews legal documents and contracts", "system_prompt": "You are an expert legal reviewer...", "tools": [read_document, analyze_contract], - "model": "anthropic:claude-sonnet-4-20250514", # Large context for long documents + "model": "claude-sonnet-4-5-20250929", # Large context for long documents }, { "name": "financial-analyst", @@ -302,7 +302,7 @@ subagents = [ ] agent = create_deep_agent( - model="anthropic:claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", system_prompt="You coordinate data analysis and reporting. Use subagents for specialized tasks.", subagents=subagents ) diff --git a/src/oss/javascript/integrations/chat/anthropic.mdx b/src/oss/javascript/integrations/chat/anthropic.mdx index 251c771e85..5f4e1c4028 100644 --- a/src/oss/javascript/integrations/chat/anthropic.mdx +++ b/src/oss/javascript/integrations/chat/anthropic.mdx @@ -65,7 +65,7 @@ Now we can instantiate our model object and generate chat completions: import { ChatAnthropic } from "@langchain/anthropic" const llm = new ChatAnthropic({ - model: "claude-3-haiku-20240307", + model: "claude-haiku-4-5-20251001", temperature: 0, maxTokens: undefined, maxRetries: 2, @@ -94,7 +94,7 @@ AIMessage { "id": "msg_013WBXXiggy6gMbAUY6NpsuU", "type": "message", "role": "assistant", - "model": "claude-3-haiku-20240307", + "model": "claude-haiku-4-5-20251001", "stop_reason": "end_turn", "stop_sequence": null, "usage": { @@ -104,7 +104,7 @@ AIMessage { }, "response_metadata": { "id": "msg_013WBXXiggy6gMbAUY6NpsuU", - "model": "claude-3-haiku-20240307", + "model": "claude-haiku-4-5-20251001", "stop_reason": "end_turn", "stop_sequence": null, "usage": { @@ -159,7 +159,7 @@ const calculatorTool = { }; const toolCallingLlm = new ChatAnthropic({ - model: "claude-3-haiku-20240307", + model: "claude-haiku-4-5-20251001", }).bindTools([calculatorTool]); const toolPrompt = ChatPromptTemplate.fromMessages([ @@ -201,7 +201,7 @@ AIMessage { "id": "msg_01DZGs9DyuashaYxJ4WWpWUP", "type": "message", "role": "assistant", - "model": "claude-3-haiku-20240307", + "model": "claude-haiku-4-5-20251001", "stop_reason": "tool_use", "stop_sequence": null, "usage": { @@ -211,7 +211,7 @@ AIMessage { }, "response_metadata": { "id": "msg_01DZGs9DyuashaYxJ4WWpWUP", - "model": "claude-3-haiku-20240307", + "model": "claude-haiku-4-5-20251001", "stop_reason": "tool_use", "stop_sequence": null, "usage": { @@ -250,7 +250,7 @@ You can pass custom headers in your requests like this: import { ChatAnthropic } from "@langchain/anthropic"; const llmWithCustomHeaders = new ChatAnthropic({ - model: "claude-3-sonnet-20240229", + model: "claude-sonnet-4-5-20250929", maxTokens: 1024, clientOptions: { defaultHeaders: { @@ -592,7 +592,7 @@ void 0; import { ChatAnthropic } from "@langchain/anthropic"; const modelWithCaching = new ChatAnthropic({ - model: "claude-3-haiku-20240307", + model: "claude-haiku-4-5-20251001", clientOptions: { defaultHeaders: { "anthropic-beta": "prompt-caching-2024-07-31", @@ -723,7 +723,7 @@ In this example we pass a [plain text document](https://docs.claude.com/en/docs/ import { ChatAnthropic } from "@langchain/anthropic"; const citationsModel = new ChatAnthropic({ - model: "claude-3-5-haiku-latest", + model: "claude-haiku-4-5-20251001", }); const messagesWithCitations = [ @@ -807,7 +807,7 @@ This approach is helpful when you want Claude to cite information from a specifi import { ChatAnthropic } from "@langchain/anthropic"; const citationsModel = new ChatAnthropic({ - model: "claude-3-5-haiku-latest", + model: "claude-haiku-4-5-20251001", }); const messagesWithCitations = [ @@ -900,7 +900,7 @@ const ragTool = tool( // Create model with search results beta header const model = new ChatAnthropic({ - model: "claude-3-5-haiku-latest", + model: "claude-haiku-4-5-20251001", }).bindTools([ragTool]); const result = await model.invoke([ @@ -962,7 +962,7 @@ const messageWithSplitDocuments = { // Query LLM const citationsModelWithSplits = new ChatAnthropic({ - model: "claude-3-5-sonnet-latest", + model: "claude-sonnet-4-5-20250929", }); const resWithSplits = await citationsModelWithSplits.invoke([messageWithSplitDocuments]); @@ -1006,7 +1006,7 @@ See [Anthropic documentation](https://docs.claude.com/en/docs/build-with-claude/ import { ChatAnthropic } from "@langchain/anthropic"; const llm = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", clientOptions: { defaultHeaders: { "anthropic-beta": "context-management-2025-06-27", diff --git a/src/oss/javascript/migrate/langchain-v1.mdx b/src/oss/javascript/migrate/langchain-v1.mdx index 848b1ab0cf..1cb45bd9ca 100644 --- a/src/oss/javascript/migrate/langchain-v1.mdx +++ b/src/oss/javascript/migrate/langchain-v1.mdx @@ -190,11 +190,11 @@ v1 includes built-in summarization middleware: import { createAgent, summarizationMiddleware } from "langchain"; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools, middleware: [ summarizationMiddleware({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", maxTokensBeforeSummary: 1000, }), ], @@ -208,7 +208,7 @@ function customSummarization(state) { } const agent = createReactAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools, preModelHook: customSummarization, }); @@ -230,7 +230,7 @@ v1 includes a built-in human-in-the-loop middleware: import { createAgent, humanInTheLoopMiddleware } from "langchain"; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [readEmail, sendEmail], middleware: [ humanInTheLoopMiddleware({ @@ -249,7 +249,7 @@ function customHumanInTheLoopHook(state) { } const agent = createReactAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [readEmail, sendEmail], postModelHook: customHumanInTheLoopHook, }); @@ -292,7 +292,7 @@ const greet = tool( ); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [greet], middleware: [userState], }); @@ -321,7 +321,7 @@ const greet = tool( // Custom state was provided via agent-level state schema or accessed ad hoc in hooks const agent = createReactAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [greet], stateSchema: UserState, }); @@ -350,7 +350,7 @@ const dynamicModel = createMiddleware({ }); const agent = createAgent({ - model: "openai:gpt-5-nano", + model: "gpt-5-nano", tools, middleware: [dynamicModel], }); @@ -379,7 +379,7 @@ To better support structured output, `createAgent` should receive a plain model // const agent = createAgent({ model: modelWithTools, tools: [] }); // Use instead -const agent = createAgent({ model: "openai:gpt-4o-mini", tools: [someTool] }); +const agent = createAgent({ model: "gpt-4o-mini", tools: [someTool] }); ``` ### Tools @@ -408,7 +408,7 @@ const errorHandling = createMiddleware({ }); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [checkWeather, searchWeb], middleware: [errorHandling], }); @@ -417,7 +417,7 @@ const agent = createAgent({ import { createReactAgent } from "@langchain/langgraph/prebuilts"; const agent = createReactAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [checkWeather, searchWeb], // Error handling commonly implemented inside tool code or post hooks }); @@ -448,7 +448,7 @@ const OutputSchema = z.object({ }); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools, // explicitly using tool strategy responseFormat: toolStrategy(OutputSchema), // [!code highlight] @@ -464,7 +464,7 @@ const OutputSchema = z.object({ }); const agent = createReactAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools, // Structured output was driven primarily via tool-calling with fewer options responseFormat: OutputSchema, @@ -490,7 +490,7 @@ import { createAgent, HumanMessage } from "langchain"; import * as z from "zod"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools, contextSchema: z.object({ userId: z.string(), sessionId: z.string() }), }); diff --git a/src/oss/javascript/releases/langchain-v1.mdx b/src/oss/javascript/releases/langchain-v1.mdx index 04e5804e9f..6687d8234d 100644 --- a/src/oss/javascript/releases/langchain-v1.mdx +++ b/src/oss/javascript/releases/langchain-v1.mdx @@ -44,7 +44,7 @@ For a complete list of changes, see the [migration guide](/oss/migrate/langchain import { createAgent } from "langchain"; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [getWeather], systemPrompt: "You are a helpful assistant.", }); @@ -93,12 +93,12 @@ import { } from "langchain"; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [readEmail, sendEmail], middleware: [ piiRedactionMiddleware({ patterns: ["email", "phone", "ssn"] }), summarizationMiddleware({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", maxTokensBeforeSummary: 500, }), humanInTheLoopMiddleware({ @@ -163,7 +163,7 @@ const expertiseBasedToolMiddleware = createMiddleware({ }); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [simpleSearch, advancedSearch, basicCalculator, dataAnalysis], middleware: [expertiseBasedToolMiddleware], contextSchema, @@ -211,7 +211,7 @@ const weatherSchema = z.object({ }); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [getWeather], responseFormat: weatherSchema, }); diff --git a/src/oss/langchain/agents.mdx b/src/oss/langchain/agents.mdx index 566f40024d..db431a5281 100644 --- a/src/oss/langchain/agents.mdx +++ b/src/oss/langchain/agents.mdx @@ -75,7 +75,7 @@ To initialize a static model from a str: return base_prompt agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[web_search], middleware=[user_role_prompt], context_schema=Context @@ -510,7 +510,7 @@ const contextSchema = z.object({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [/* ... */], contextSchema, middleware: [ @@ -587,7 +587,7 @@ class ContactInfo(BaseModel): phone: str agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=[search_tool], response_format=ToolStrategy(ContactInfo) ) @@ -608,7 +608,7 @@ result["structured_response"] from langchain.agents.structured_output import ProviderStrategy agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", response_format=ProviderStrategy(ContactInfo) ) ``` @@ -632,7 +632,7 @@ const ContactInfo = z.object({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", responseFormat: ContactInfo, }); @@ -749,7 +749,7 @@ const customAgentState = z.object({ }); const CustomAgentState = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [], stateSchema: customAgentState, }); diff --git a/src/oss/langchain/context-engineering.mdx b/src/oss/langchain/context-engineering.mdx index f4b73cecd3..0c4ad3d7ab 100644 --- a/src/oss/langchain/context-engineering.mdx +++ b/src/oss/langchain/context-engineering.mdx @@ -114,6 +114,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con Access message count or conversation context from state: :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import dynamic_prompt, ModelRequest @@ -131,7 +132,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con return base agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[state_aware_prompt] ) @@ -143,7 +144,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con import { createAgent } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [ dynamicSystemPromptMiddleware((state) => { @@ -168,6 +169,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con Access user preferences from long-term memory: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -195,7 +197,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con return base agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[store_aware_prompt], context_schema=Context, @@ -216,7 +218,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con type Context = z.infer; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], contextSchema, middleware: [ @@ -246,6 +248,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con Access user ID or configuration from Runtime Context: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -275,7 +278,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con return base agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[context_aware_prompt], context_schema=Context @@ -296,7 +299,7 @@ The system prompt sets the LLM's behavior and capabilities. Different users, con type Context = z.infer; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], contextSchema, middleware: [ @@ -337,6 +340,7 @@ It's critical to manage the content of messages to ensure that the LLM has the r Inject uploaded file context from State when relevant to current query: :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse @@ -374,7 +378,7 @@ Reference these files when answering questions.""" return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[inject_file_context] ) @@ -415,7 +419,7 @@ Reference these files when answering questions.`; }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [injectFileContext], }); @@ -427,6 +431,7 @@ Reference these files when answering questions.`; Inject user's email writing style from Store to guide drafting: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -470,7 +475,7 @@ Reference these files when answering questions.`; return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[inject_writing_style], context_schema=Context, @@ -527,6 +532,7 @@ ${style.exampleEmail || ''}`; Inject compliance rules from Runtime Context based on user's jurisdiction: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -575,7 +581,7 @@ ${style.exampleEmail || ''}`; return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[inject_compliance_rules], context_schema=Context @@ -713,6 +719,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm Enable advanced tools only after certain conversation milestones: :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse @@ -741,7 +748,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[public_search, private_search, advanced_search], middleware=[state_based_tools] ) @@ -780,6 +787,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm Filter tools based on user preferences or feature flags in Store: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -812,7 +820,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, analysis_tool, export_tool], middleware=[store_based_tools], context_schema=Context, @@ -858,6 +866,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm Filter tools based on user permissions from Runtime Context: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -892,7 +901,7 @@ Not every tool is appropriate for every situation. Too many tools may overwhelm return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[read_data, write_data, delete_data], middleware=[context_based_tools], context_schema=Context @@ -946,6 +955,7 @@ might change during an agent run. Use different models based on conversation length from State: :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse @@ -953,9 +963,9 @@ might change during an agent run. from typing import Callable # Initialize models once outside the middleware - large_model = init_chat_model("anthropic:claude-sonnet-4-5") - standard_model = init_chat_model("openai:gpt-4o") - efficient_model = init_chat_model("openai:gpt-4o-mini") + large_model = init_chat_model("claude-sonnet-4-5-20250929") + standard_model = init_chat_model("gpt-4o") + efficient_model = init_chat_model("gpt-4o-mini") @wrap_model_call def state_based_model( @@ -981,7 +991,7 @@ might change during an agent run. return handler(request) agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=[...], middleware=[state_based_model] ) @@ -989,13 +999,14 @@ might change during an agent run. ::: :::js + ```typescript import { createMiddleware, initChatModel } from "langchain"; // Initialize models once outside the middleware - const largeModel = initChatModel("anthropic:claude-sonnet-4-5"); - const standardModel = initChatModel("openai:gpt-4o"); - const efficientModel = initChatModel("openai:gpt-4o-mini"); + const largeModel = initChatModel("claude-sonnet-4-5-20250929"); + const standardModel = initChatModel("gpt-4o"); + const efficientModel = initChatModel("gpt-4o-mini"); const stateBasedModel = createMiddleware({ name: "StateBasedModel", @@ -1023,6 +1034,7 @@ might change during an agent run. Use user's preferred model from Store: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -1037,9 +1049,9 @@ might change during an agent run. # Initialize available models once MODEL_MAP = { - "gpt-4o": init_chat_model("openai:gpt-4o"), - "gpt-4o-mini": init_chat_model("openai:gpt-4o-mini"), - "claude-sonnet": init_chat_model("anthropic:claude-sonnet-4-5"), + "gpt-4o": init_chat_model("gpt-4o"), + "gpt-4o-mini": init_chat_model("gpt-4o-mini"), + "claude-sonnet": init_chat_model("claude-sonnet-4-5-20250929"), } @wrap_model_call @@ -1062,7 +1074,7 @@ might change during an agent run. return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[store_based_model], context_schema=Context, @@ -1072,6 +1084,7 @@ might change during an agent run. ::: :::js + ```typescript import * as z from "zod"; import { createMiddleware, initChatModel } from "langchain"; @@ -1082,9 +1095,9 @@ might change during an agent run. // Initialize available models once const MODEL_MAP = { - "gpt-4o": initChatModel("openai:gpt-4o"), - "gpt-4o-mini": initChatModel("openai:gpt-4o-mini"), - "claude-sonnet": initChatModel("anthropic:claude-sonnet-4-5"), + "gpt-4o": initChatModel("gpt-4o"), + "gpt-4o-mini": initChatModel("gpt-4o-mini"), + "claude-sonnet": initChatModel("claude-sonnet-4-5-20250929"), }; const storeBasedModel = createMiddleware({ @@ -1117,6 +1130,7 @@ might change during an agent run. Select model based on cost limits or environment from Runtime Context: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -1130,9 +1144,9 @@ might change during an agent run. environment: str # Initialize models once outside the middleware - premium_model = init_chat_model("anthropic:claude-sonnet-4-5") - standard_model = init_chat_model("openai:gpt-4o") - budget_model = init_chat_model("openai:gpt-4o-mini") + premium_model = init_chat_model("claude-sonnet-4-5-20250929") + standard_model = init_chat_model("gpt-4o") + budget_model = init_chat_model("gpt-4o-mini") @wrap_model_call def context_based_model( @@ -1159,7 +1173,7 @@ might change during an agent run. return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[context_based_model], context_schema=Context @@ -1168,6 +1182,7 @@ might change during an agent run. ::: :::js + ```typescript import * as z from "zod"; import { createMiddleware, initChatModel } from "langchain"; @@ -1178,9 +1193,9 @@ might change during an agent run. }); // Initialize models once outside the middleware - const premiumModel = initChatModel("anthropic:claude-sonnet-4-5"); - const standardModel = initChatModel("openai:gpt-4o"); - const budgetModel = initChatModel("openai:gpt-4o-mini"); + const premiumModel = initChatModel("claude-sonnet-4-5-20250929"); + const standardModel = initChatModel("gpt-4o"); + const budgetModel = initChatModel("gpt-4o-mini"); const contextBasedModel = createMiddleware({ name: "ContextBasedModel", @@ -1272,6 +1287,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv Configure structured output based on conversation state: :::python + ```python from langchain.agents import create_agent from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse @@ -1307,7 +1323,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[state_based_output] ) @@ -1315,6 +1331,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv ::: :::js + ```typescript import { createMiddleware } from "langchain"; import { z } from "zod"; @@ -1354,6 +1371,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv Configure output format based on user preferences in Store: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -1397,7 +1415,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[store_based_output], context_schema=Context, @@ -1407,6 +1425,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv ::: :::js + ```typescript import * as z from "zod"; import { createMiddleware } from "langchain"; @@ -1453,6 +1472,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv Configure output format based on Runtime Context like user role or environment: :::python + ```python from dataclasses import dataclass from langchain.agents import create_agent @@ -1495,7 +1515,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[context_based_output], context_schema=Context @@ -1504,6 +1524,7 @@ Dynamic response format selection adapts schemas based on user preferences, conv ::: :::js + ```typescript import * as z from "zod"; import { createMiddleware } from "langchain"; @@ -1561,6 +1582,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I Read from State to check current session information: :::python + ```python from langchain.tools import tool, ToolRuntime from langchain.agents import create_agent @@ -1580,13 +1602,14 @@ Most real-world tools need more than just the LLM's parameters. They need user I return "User is not authenticated" agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[check_authentication] ) ``` ::: :::js + ```typescript import * as z from "zod"; import { tool } from "@langchain/core/tools"; @@ -1618,6 +1641,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I Read from Store to access persisted user preferences: :::python + ```python from dataclasses import dataclass from langchain.tools import tool, ToolRuntime @@ -1647,7 +1671,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I return "No preferences found" agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[get_preference], context_schema=Context, store=InMemoryStore() @@ -1656,6 +1680,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I ::: :::js + ```typescript import * as z from "zod"; import { tool } from "@langchain/core/tools"; @@ -1696,6 +1721,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I Read from Runtime Context for configuration like API keys and user IDs: :::python + ```python from dataclasses import dataclass from langchain.tools import tool, ToolRuntime @@ -1724,7 +1750,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I return f"Found {len(results)} results for user {user_id}" agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[fetch_user_data], context_schema=Context ) @@ -1742,6 +1768,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I ::: :::js + ```typescript import * as z from "zod"; import { tool } from "@langchain/core/tools"; @@ -1773,7 +1800,7 @@ Most real-world tools need more than just the LLM's parameters. They need user I ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [fetchUserData], contextSchema, }); @@ -1792,6 +1819,7 @@ and update the memory of the agent to make important context available to future Write to State to track session-specific information using Command: :::python + ```python from langchain.tools import tool, ToolRuntime from langchain.agents import create_agent @@ -1813,13 +1841,14 @@ and update the memory of the agent to make important context available to future return Command(update={"authenticated": False}) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[authenticate_user] ) ``` ::: :::js + ```typescript import * as z from "zod"; import { tool } from "@langchain/core/tools"; @@ -1854,6 +1883,7 @@ and update the memory of the agent to make important context available to future Write to Store to persist data across sessions: :::python + ```python from dataclasses import dataclass from langchain.tools import tool, ToolRuntime @@ -1887,7 +1917,7 @@ and update the memory of the agent to make important context available to future return f"Saved preference: {preference_key} = {preference_value}" agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[save_preference], context_schema=Context, store=InMemoryStore() @@ -1896,6 +1926,7 @@ and update the memory of the agent to make important context available to future ::: :::js + ```typescript import * as z from "zod"; import { tool } from "@langchain/core/tools"; @@ -1967,11 +1998,11 @@ from langchain.agents import create_agent from langchain.agents.middleware import SummarizationMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[ SummarizationMiddleware( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", max_tokens_before_summary=4000, # Trigger summarization at 4000 tokens messages_to_keep=20, # Keep last 20 messages after summary ), @@ -1985,11 +2016,11 @@ agent = create_agent( import { createAgent, summarizationMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [ summarizationMiddleware({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", maxTokensBeforeSummary: 4000, // Trigger summarization at 4000 tokens messagesToKeep: 20, // Keep last 20 messages after summary }), diff --git a/src/oss/langchain/errors/MESSAGE_COERCION_FAILURE.mdx b/src/oss/langchain/errors/MESSAGE_COERCION_FAILURE.mdx index 0bcb95be96..57dfdcac33 100644 --- a/src/oss/langchain/errors/MESSAGE_COERCION_FAILURE.mdx +++ b/src/oss/langchain/errors/MESSAGE_COERCION_FAILURE.mdx @@ -37,7 +37,7 @@ from langchain_anthropic import ChatAnthropic uncoercible_message = {"role": "HumanMessage", "random_field": "random value"} -model = ChatAnthropic(model="claude-3-5-sonnet-latest") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") model.invoke([uncoercible_message]) ``` diff --git a/src/oss/langchain/guardrails.mdx b/src/oss/langchain/guardrails.mdx index 502cfbf356..484afd448c 100644 --- a/src/oss/langchain/guardrails.mdx +++ b/src/oss/langchain/guardrails.mdx @@ -61,7 +61,7 @@ from langchain.agents.middleware import PIIMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[customer_service_tool, email_tool], middleware=[ # Redact emails in user input before sending to model @@ -98,7 +98,7 @@ result = agent.invoke({ import { createAgent, piiRedactionMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [customerServiceTool, emailTool], middleware: [ // Redact emails in user input before sending to model @@ -185,7 +185,7 @@ from langgraph.types import Command agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, send_email_tool, delete_database_tool], middleware=[ HumanInTheLoopMiddleware( @@ -224,7 +224,7 @@ import { createAgent, humanInTheLoopMiddleware } from "langchain"; import { MemorySaver, Command } from "@langchain/langgraph"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [searchTool, sendEmailTool, deleteDatabaseTool], middleware: [ humanInTheLoopMiddleware({ @@ -315,7 +315,7 @@ class ContentFilterMiddleware(AgentMiddleware): from langchain.agents import create_agent agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, calculator_tool], middleware=[ ContentFilterMiddleware( @@ -369,7 +369,7 @@ def content_filter(state: AgentState, runtime: Runtime) -> dict[str, Any] | None from langchain.agents import create_agent agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, calculator_tool], middleware=[content_filter], ) @@ -429,7 +429,7 @@ const contentFilterMiddleware = (bannedKeywords: string[]) => { import { createAgent } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [searchTool, calculatorTool], middleware: [ contentFilterMiddleware(["hack", "exploit", "malware"]), @@ -463,7 +463,7 @@ class SafetyGuardrailMiddleware(AgentMiddleware): def __init__(self): super().__init__() - self.safety_model = init_chat_model("openai:gpt-4o-mini") + self.safety_model = init_chat_model("gpt-4o-mini") @hook_config(can_jump_to=["end"]) def after_agent(self, state: AgentState, runtime: Runtime) -> dict[str, Any] | None: @@ -498,7 +498,7 @@ class SafetyGuardrailMiddleware(AgentMiddleware): from langchain.agents import create_agent agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, calculator_tool], middleware=[SafetyGuardrailMiddleware()], ) @@ -515,7 +515,7 @@ from langchain_core.messages import AIMessage from langchain.chat_models import init_chat_model from typing import Any -safety_model = init_chat_model("openai:gpt-4o-mini") +safety_model = init_chat_model("gpt-4o-mini") @after_agent(can_jump_to=["end"]) def safety_guardrail(state: AgentState, runtime: Runtime) -> dict[str, Any] | None: @@ -551,7 +551,7 @@ def safety_guardrail(state: AgentState, runtime: Runtime) -> dict[str, Any] | No from langchain.agents import create_agent agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, calculator_tool], middleware=[safety_guardrail], ) @@ -570,7 +570,7 @@ result = agent.invoke({ import { createMiddleware, AIMessage, initChatModel } from "langchain"; const safetyGuardrailMiddleware = () => { - const safetyModel = initChatModel("openai:gpt-4o-mini"); + const safetyModel = initChatModel("gpt-4o-mini"); return createMiddleware({ name: "SafetyGuardrailMiddleware", @@ -615,7 +615,7 @@ const safetyGuardrailMiddleware = () => { import { createAgent } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [searchTool, calculatorTool], middleware: [safetyGuardrailMiddleware()], }); @@ -636,7 +636,7 @@ from langchain.agents import create_agent from langchain.agents.middleware import PIIMiddleware, HumanInTheLoopMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, send_email_tool], middleware=[ # Layer 1: Deterministic input filter (before agent) @@ -661,7 +661,7 @@ agent = create_agent( import { createAgent, piiRedactionMiddleware, humanInTheLoopMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [searchTool, sendEmailTool], middleware: [ // Layer 1: Deterministic input filter (before agent) diff --git a/src/oss/langchain/human-in-the-loop.mdx b/src/oss/langchain/human-in-the-loop.mdx index 97d4d19b8f..f690d7bac5 100644 --- a/src/oss/langchain/human-in-the-loop.mdx +++ b/src/oss/langchain/human-in-the-loop.mdx @@ -44,7 +44,7 @@ from langgraph.checkpoint.memory import InMemorySaver # [!code highlight] agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[write_file_tool, execute_sql_tool, read_data_tool], middleware=[ HumanInTheLoopMiddleware( # [!code highlight] @@ -73,7 +73,7 @@ import { createAgent, humanInTheLoopMiddleware } from "langchain"; // [!code hig import { MemorySaver } from "@langchain/langgraph"; // [!code highlight] const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [writeFileTool, executeSQLTool, readDataTool], middleware: [ humanInTheLoopMiddleware({ diff --git a/src/oss/langchain/long-term-memory.mdx b/src/oss/langchain/long-term-memory.mdx index f15bb5ee5d..7891624896 100644 --- a/src/oss/langchain/long-term-memory.mdx +++ b/src/oss/langchain/long-term-memory.mdx @@ -135,7 +135,7 @@ def get_user_info(runtime: ToolRuntime[Context]) -> str: return str(user_info.value) if user_info else "Unknown user" agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[get_user_info], # Pass store to agent - enables agent to access store when running tools store=store, # [!code highlight] @@ -193,7 +193,7 @@ const getUserInfo = tool( ); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [getUserInfo], contextSchema, // Pass store to agent - enables agent to access store when running tools @@ -253,7 +253,7 @@ def save_user_info(user_info: UserInfo, runtime: ToolRuntime[Context]) -> str: return "Successfully saved user info." agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[save_user_info], store=store, # [!code highlight] context_schema=Context @@ -309,7 +309,7 @@ const saveUserInfo = tool( ); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [saveUserInfo], contextSchema, store, // [!code highlight] diff --git a/src/oss/langchain/mcp.mdx b/src/oss/langchain/mcp.mdx index eff6005f8c..9343d45282 100644 --- a/src/oss/langchain/mcp.mdx +++ b/src/oss/langchain/mcp.mdx @@ -85,7 +85,7 @@ client = MultiServerMCPClient( # [!code highlight] tools = await client.get_tools() # [!code highlight] agent = create_agent( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", tools # [!code highlight] ) math_response = await agent.ainvoke( @@ -122,7 +122,7 @@ const client = new MultiServerMCPClient({ // [!code highlight] const tools = await client.getTools(); // [!code highlight] const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools, // [!code highlight] }); diff --git a/src/oss/langchain/messages.mdx b/src/oss/langchain/messages.mdx index 7f60879fd5..b0899529f9 100644 --- a/src/oss/langchain/messages.mdx +++ b/src/oss/langchain/messages.mdx @@ -25,7 +25,7 @@ The simplest way to use messages is to create message objects and pass them to a from langchain.chat_models import init_chat_model from langchain.messages import HumanMessage, AIMessage, SystemMessage -model = init_chat_model("openai:gpt-5-nano") +model = init_chat_model("gpt-5-nano") system_msg = SystemMessage("You are a helpful assistant.") human_msg = HumanMessage("Hello, how are you?") @@ -384,7 +384,7 @@ When models make [tool calls](/oss/langchain/models#tool-calling), they're inclu ```python from langchain.chat_models import init_chat_model -model = init_chat_model("openai:gpt-5-nano") +model = init_chat_model("gpt-5-nano") def get_weather(location: str) -> str: """Get the weather at a location.""" @@ -423,7 +423,7 @@ An @[`AIMessage`] can hold token counts and other usage metadata in its @[`usage ```python from langchain.chat_models import init_chat_model -model = init_chat_model("openai:gpt-5-nano") +model = init_chat_model("gpt-5-nano") response = model.invoke("Hello!") response.usage_metadata @@ -822,7 +822,7 @@ inference provider of your choice. ```python from langchain.chat_models import init_chat_model - model = init_chat_model("openai:gpt-5-nano", output_version="v1") + model = init_chat_model("gpt-5-nano", output_version="v1") ``` ::: :::js diff --git a/src/oss/langchain/middleware.mdx b/src/oss/langchain/middleware.mdx index a9490beef4..43b7175442 100644 --- a/src/oss/langchain/middleware.mdx +++ b/src/oss/langchain/middleware.mdx @@ -53,7 +53,7 @@ from langchain.agents.middleware import SummarizationMiddleware, HumanInTheLoopM agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[SummarizationMiddleware(), HumanInTheLoopMiddleware()], ) @@ -69,7 +69,7 @@ import { } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [summarizationMiddleware, humanInTheLoopMiddleware], }); @@ -98,11 +98,11 @@ from langchain.agents.middleware import SummarizationMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[weather_tool, calculator_tool], middleware=[ SummarizationMiddleware( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", max_tokens_before_summary=4000, # Trigger summarization at 4000 tokens messages_to_keep=20, # Keep last 20 messages after summary summary_prompt="Custom prompt for summarization...", # Optional @@ -117,11 +117,11 @@ agent = create_agent( import { createAgent, summarizationMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [weatherTool, calculatorTool], middleware: [ summarizationMiddleware({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", maxTokensBeforeSummary: 4000, // Trigger summarization at 4000 tokens messagesToKeep: 20, // Keep last 20 messages after summary summaryPrompt: "Custom prompt for summarization...", // Optional @@ -206,7 +206,7 @@ from langgraph.checkpoint.memory import InMemorySaver agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[read_email_tool, send_email_tool], checkpointer=InMemorySaver(), middleware=[ @@ -230,7 +230,7 @@ agent = create_agent( import { createAgent, humanInTheLoopMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [readEmailTool, sendEmailTool], middleware: [ humanInTheLoopMiddleware({ @@ -329,7 +329,7 @@ Please be a helpful assistant. """ agent = create_agent( - model=ChatAnthropic(model="claude-sonnet-4-latest"), + model=ChatAnthropic(model="claude-sonnet-4-5-20250929"), system_prompt=LONG_PROMPT, middleware=[AnthropicPromptCachingMiddleware(ttl="5m")], ) @@ -353,7 +353,7 @@ Please be a helpful assistant. `; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-latest", + model: "claude-sonnet-4-5-20250929", prompt: LONG_PROMPT, middleware: [anthropicPromptCachingMiddleware({ ttl: "5m" })], }); @@ -416,7 +416,7 @@ from langchain.agents.middleware import ModelCallLimitMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[ ModelCallLimitMiddleware( @@ -434,7 +434,7 @@ agent = create_agent( import { createAgent, modelCallLimitMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [ modelCallLimitMiddleware({ @@ -507,7 +507,7 @@ search_limiter = ToolCallLimitMiddleware( ) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[global_limiter, search_limiter], ) @@ -529,7 +529,7 @@ const searchLimiter = toolCallLimitMiddleware({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [globalLimiter, searchLimiter], }); @@ -594,12 +594,12 @@ from langchain.agents.middleware import ModelFallbackMiddleware agent = create_agent( - model="openai:gpt-4o", # Primary model + model="gpt-4o", # Primary model tools=[...], middleware=[ ModelFallbackMiddleware( - "openai:gpt-4o-mini", # Try first on error - "anthropic:claude-3-5-sonnet-20241022", # Then this + "gpt-4o-mini", # Try first on error + "claude-3-5-sonnet-20241022", # Then this ), ], ) @@ -611,12 +611,12 @@ agent = create_agent( import { createAgent, modelFallbackMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", // Primary model + model: "gpt-4o", // Primary model tools: [...], middleware: [ modelFallbackMiddleware( - "openai:gpt-4o-mini", // Try first on error - "anthropic:claude-3-5-sonnet-20241022" // Then this + "gpt-4o-mini", // Try first on error + "claude-3-5-sonnet-20241022" // Then this ), ], }); @@ -627,11 +627,11 @@ const agent = createAgent({ :::python - First fallback model to try when the primary model fails. Can be a model string (e.g., `"openai:gpt-4o-mini"`) or a `BaseChatModel` instance. + First fallback model to try when the primary model fails. Can be a model string (e.g., `"openai:gpt-4o-mini"`) or a `BaseChatModel` instance. - Additional fallback models to try in order if previous models fail + Additional fallback models to try in order if previous models fail ::: @@ -671,7 +671,7 @@ from langchain.agents.middleware import PIIMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[ # Redact emails in user input @@ -694,7 +694,7 @@ agent = create_agent( import { createAgent, piiRedactionMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [ // Redact emails in user input @@ -800,7 +800,7 @@ from langchain.messages import HumanMessage agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[TodoListMiddleware()], ) @@ -815,7 +815,7 @@ print(result["todos"]) # Array of todo items with status tracking import { createAgent, HumanMessage, todoListMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [ /* ... */ ], @@ -865,11 +865,11 @@ from langchain.agents.middleware import LLMToolSelectorMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[tool1, tool2, tool3, tool4, tool5, ...], # Many tools middleware=[ LLMToolSelectorMiddleware( - model="openai:gpt-4o-mini", # Use cheaper model for selection + model="gpt-4o-mini", # Use cheaper model for selection max_tools=3, # Limit to 3 most relevant tools always_include=["search"], # Always include certain tools ), @@ -883,11 +883,11 @@ agent = create_agent( import { createAgent, llmToolSelectorMiddleware } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [tool1, tool2, tool3, tool4, tool5, ...], // Many tools middleware: [ llmToolSelectorMiddleware({ - model: "openai:gpt-4o-mini", // Use cheaper model for selection + model: "gpt-4o-mini", // Use cheaper model for selection maxTools: 3, // Limit to 3 most relevant tools alwaysInclude: ["search"], // Always include certain tools }), @@ -951,7 +951,7 @@ from langchain.agents.middleware import ToolRetryMiddleware agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[search_tool, database_tool], middleware=[ ToolRetryMiddleware( @@ -1025,7 +1025,7 @@ from langchain.agents.middleware import LLMToolEmulator agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[get_weather, search_database, send_email], middleware=[ # Emulate all tools by default @@ -1035,7 +1035,7 @@ agent = create_agent( # LLMToolEmulator(tools=["get_weather", "search_database"]), # Or use a custom model for emulation - # LLMToolEmulator(model="anthropic:claude-3-5-sonnet-latest"), + # LLMToolEmulator(model="claude-sonnet-4-5-20250929"), ], ) ``` @@ -1043,11 +1043,11 @@ agent = create_agent( - List of tool names (str) or BaseTool instances to emulate. If `None` (default), ALL tools will be emulated. If empty list, no tools will be emulated. + List of tool names (str) or BaseTool instances to emulate. If `None` (default), ALL tools will be emulated. If empty list, no tools will be emulated. - Model to use for generating emulated tool responses. Can be a model identifier string or BaseChatModel instance. + Model to use for generating emulated tool responses. Can be a model identifier string or BaseChatModel instance. @@ -1072,7 +1072,7 @@ from langchain.agents.middleware import ContextEditingMiddleware, ClearToolUsesE agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[...], middleware=[ ContextEditingMiddleware( @@ -1090,7 +1090,7 @@ agent = create_agent( import { createAgent, contextEditingMiddleware, ClearToolUsesEdit } from "langchain"; const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [ contextEditingMiddleware({ @@ -1217,7 +1217,7 @@ def personalized_prompt(request: ModelRequest) -> str: # Use decorators in agent agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", middleware=[log_before_model, validate_output, retry_model, personalized_prompt], tools=[...], ) @@ -1452,9 +1452,9 @@ class DynamicModelMiddleware(AgentMiddleware): ) -> ModelResponse: # Use different model based on conversation length if len(request.messages) > 10: - request.model = init_chat_model("openai:gpt-4o") + request.model = init_chat_model("gpt-4o") else: - request.model = init_chat_model("openai:gpt-4o-mini") + request.model = init_chat_model("gpt-4o-mini") return handler(request) ``` @@ -1470,9 +1470,9 @@ const dynamicModelMiddleware = createMiddleware({ // Use different model based on conversation length const modifiedRequest = { ...request }; if (request.messages.length > 10) { - modifiedRequest.model = initChatModel("openai:gpt-4o"); + modifiedRequest.model = initChatModel("gpt-4o"); } else { - modifiedRequest.model = initChatModel("openai:gpt-4o-mini"); + modifiedRequest.model = initChatModel("gpt-4o-mini"); } return handler(modifiedRequest); }, @@ -1594,7 +1594,7 @@ const callCounterMiddleware = createMiddleware({ :::python ```python agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", middleware=[CallCounterMiddleware()], tools=[...], ) @@ -1611,7 +1611,7 @@ result = agent.invoke({ :::js ```typescript const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [...], middleware: [callCounterMiddleware] as const, }); @@ -1678,7 +1678,7 @@ When using multiple middleware, understanding execution order is important: :::python ```python agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", middleware=[middleware1, middleware2, middleware3], tools=[...], ) @@ -1688,7 +1688,7 @@ agent = create_agent( :::js ```typescript const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", middleware: [middleware1, middleware2, middleware3], tools: [...], }); @@ -1855,7 +1855,7 @@ class ToolSelectorMiddleware(AgentMiddleware): return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=all_tools, # All available tools need to be registered upfront # Middleware can be used to select a smaller subset that's relevant for the given run. middleware=[ToolSelectorMiddleware()], @@ -1878,7 +1878,7 @@ const toolSelectorMiddleware = createMiddleware({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: allTools, // All available tools need to be registered upfront // Middleware can be used to select a smaller subset that's relevant for the given run. middleware: [toolSelectorMiddleware], @@ -1932,7 +1932,7 @@ class ToolSelectorMiddleware(AgentMiddleware): return handler(request) agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=all_tools, middleware=[ToolSelectorMiddleware()], context_schema=Context, @@ -1992,7 +1992,7 @@ const toolSelector = createMiddleware({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: allTools, middleware: [toolSelector], }); diff --git a/src/oss/langchain/models.mdx b/src/oss/langchain/models.mdx index 3c90a470df..11a9dc177a 100644 --- a/src/oss/langchain/models.mdx +++ b/src/oss/langchain/models.mdx @@ -123,7 +123,7 @@ Using @[`init_chat_model`], pass these parameters as inline str: return f"It's always sunny in {city}!" agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[get_weather], system_prompt="You are a helpful assistant", ) @@ -105,7 +105,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [getWeather], }); diff --git a/src/oss/langchain/quickstart.mdx b/src/oss/langchain/quickstart.mdx index f97e76407b..553438b411 100644 --- a/src/oss/langchain/quickstart.mdx +++ b/src/oss/langchain/quickstart.mdx @@ -19,7 +19,7 @@ def get_weather(city: str) -> str: return f"It's always sunny in {city}!" agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[get_weather], system_prompt="You are a helpful assistant", ) @@ -48,7 +48,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [getWeather], }); @@ -207,11 +207,12 @@ Let's walk through each step: Set up your [language model](/oss/langchain/models) with the right [parameters](/oss/langchain/models#parameters) for your use case: :::python + ```python from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0.5, timeout=10, max_tokens=1000 @@ -220,11 +221,12 @@ Let's walk through each step: ::: :::js + ```ts import { initChatModel } from "langchain"; const model = await initChatModel( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", { temperature: 0.5, timeout: 10, maxTokens: 1000 } ); ``` @@ -336,7 +338,7 @@ Let's walk through each step: import { createAgent } from "langchain"; const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", systemPrompt: systemPrompt, tools: [getUserLocation, getWeather], responseFormat, @@ -415,7 +417,7 @@ def get_user_location(runtime: ToolRuntime[Context]) -> str: # Configure model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0 ) @@ -516,7 +518,7 @@ const getUserLocation = tool( // Configure model const model = await initChatModel( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", { temperature: 0 } ); @@ -531,7 +533,7 @@ const checkpointer = new MemorySaver(); // Create agent const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", systemPrompt: systemPrompt, tools: [getUserLocation, getWeather], responseFormat, diff --git a/src/oss/langchain/retrieval.mdx b/src/oss/langchain/retrieval.mdx index 701ef79bf0..05eafb11fd 100644 --- a/src/oss/langchain/retrieval.mdx +++ b/src/oss/langchain/retrieval.mdx @@ -209,7 +209,7 @@ Use fetch_url when you need to fetch information from a web-page; quote relevant """ agent = create_agent( - model="claude-sonnet-4-0", + model="claude-sonnet-4-5-20250929", tools=[fetch_url], # A tool for retrieval [!code highlight] system_prompt=system_prompt, ) @@ -218,7 +218,7 @@ agent = create_agent( :::js ```typescript -import { tool, createAgent, initChatModel } from "langchain"; +import { tool, createAgent } from "langchain"; const fetchUrl = tool( (url: string) => { @@ -318,7 +318,7 @@ print(response['messages'][-1].content) ::: :::js ```typescript -import { tool, createAgent, initChatModel, HumanMessage } from "langchain"; +import { tool, createAgent, HumanMessage } from "langchain"; import * as z from "zod"; const ALLOWED_DOMAINS = ["https://langchain-ai.github.io/"]; diff --git a/src/oss/langchain/runtime.mdx b/src/oss/langchain/runtime.mdx index 91d3500afe..125b6a1857 100644 --- a/src/oss/langchain/runtime.mdx +++ b/src/oss/langchain/runtime.mdx @@ -43,7 +43,7 @@ class Context: user_name: str agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[...], context_schema=Context # [!code highlight] ) @@ -64,7 +64,7 @@ const contextSchema = z.object({ // [!code highlight] }); // [!code highlight] const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [ /* ... */ ], @@ -187,7 +187,7 @@ def log_after_model(state: AgentState, runtime: Runtime[Context]) -> dict | None return None agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[...], middleware=[dynamic_system_prompt, log_before_model, log_after_model], # [!code highlight] context_schema=Context @@ -241,7 +241,7 @@ const loggingMiddleware = createMiddleware({ }); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [ /* ... */ ], diff --git a/src/oss/langchain/short-term-memory.mdx b/src/oss/langchain/short-term-memory.mdx index 65662f8a88..601eb57a5b 100644 --- a/src/oss/langchain/short-term-memory.mdx +++ b/src/oss/langchain/short-term-memory.mdx @@ -41,7 +41,7 @@ from langgraph.checkpoint.memory import InMemorySaver # [!code highlight] agent = create_agent( - "openai:gpt-5", + "gpt-5", [get_user_info], checkpointer=InMemorySaver(), # [!code highlight] ) @@ -60,7 +60,7 @@ import { MemorySaver } from "@langchain/langgraph"; const checkpointer = new MemorySaver(); const agent = createAgent({ - model: "anthropic:claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", tools: [], checkpointer, }); @@ -92,7 +92,7 @@ DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable with PostgresSaver.from_conn_string(DB_URI) as checkpointer: checkpointer.setup() # auto create tables in PostgresSql agent = create_agent( - "openai:gpt-5", + "gpt-5", [get_user_info], checkpointer=checkpointer, # [!code highlight] ) @@ -125,7 +125,7 @@ class CustomAgentState(AgentState): # [!code highlight] preferences: dict # [!code highlight] agent = create_agent( - "openai:gpt-5", + "gpt-5", [get_user_info], state_schema=CustomAgentState, # [!code highlight] checkpointer=InMemorySaver(), @@ -384,7 +384,7 @@ def delete_old_messages(state: AgentState, runtime: Runtime) -> dict | None: agent = create_agent( - "openai:gpt-5-nano", + "gpt-5-nano", tools=[], system_prompt="Please be concise and to the point.", middleware=[delete_old_messages], @@ -503,11 +503,11 @@ from langchain_core.runnables import RunnableConfig checkpointer = InMemorySaver() agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=[], middleware=[ SummarizationMiddleware( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", max_tokens_before_summary=4000, # Trigger summarization at 4000 tokens messages_to_keep=20, # Keep last 20 messages after summary ) @@ -541,11 +541,11 @@ import { MemorySaver } from "@langchain/langgraph"; const checkpointer = new MemorySaver(); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [], middleware: [ summarizationMiddleware({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", maxTokensBeforeSummary: 4000, messagesToKeep: 20, }), @@ -596,7 +596,7 @@ def get_user_info( return "User is John Smith" if user_id == "user_123" else "Unknown user" agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[get_user_info], state_schema=CustomState, ) @@ -701,7 +701,7 @@ def greet( return f"Hello {user_name}!" # [!code highlight] agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[update_user_info, greet], state_schema=CustomState, context_schema=CustomContext, # [!code highlight] @@ -803,7 +803,7 @@ def dynamic_system_prompt(request: ModelRequest) -> str: agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[get_weather], middleware=[dynamic_system_prompt], context_schema=CustomContext, @@ -1078,7 +1078,7 @@ def validate_response(state: AgentState, runtime: Runtime) -> dict | None: return None agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[], middleware=[validate_response], checkpointer=InMemorySaver(), diff --git a/src/oss/langchain/sql-agent.mdx b/src/oss/langchain/sql-agent.mdx index 5bc30e0662..f494ecb4d8 100644 --- a/src/oss/langchain/sql-agent.mdx +++ b/src/oss/langchain/sql-agent.mdx @@ -388,7 +388,7 @@ import requests # Initialize an LLM -model = init_chat_model("openai:gpt-4.1") +model = init_chat_model("gpt-4.1") # Get the database, store it locally url = "https://storage.googleapis.com/benchmarks-artifacts/chinook/Chinook.db" diff --git a/src/oss/langchain/streaming.mdx b/src/oss/langchain/streaming.mdx index 56d81d5b05..3433dcf293 100644 --- a/src/oss/langchain/streaming.mdx +++ b/src/oss/langchain/streaming.mdx @@ -47,7 +47,7 @@ def get_weather(city: str) -> str: return f"It's always sunny in {city}!" agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[get_weather], ) for chunk in agent.stream( # [!code highlight] @@ -166,7 +166,7 @@ def get_weather(city: str) -> str: return f"It's always sunny in {city}!" agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[get_weather], ) for token, metadata in agent.stream( # [!code highlight] @@ -297,7 +297,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [getWeather], }); @@ -330,7 +330,7 @@ def get_weather(city: str) -> str: return f"It's always sunny in {city}!" agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[get_weather], ) @@ -377,7 +377,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [getWeather], }); @@ -417,7 +417,7 @@ def get_weather(city: str) -> str: return f"It's always sunny in {city}!" agent = create_agent( - model="openai:gpt-5-nano", + model="gpt-5-nano", tools=[get_weather], ) @@ -478,7 +478,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o-mini", + model: "gpt-4o-mini", tools: [getWeather], }); diff --git a/src/oss/langchain/structured-output.mdx b/src/oss/langchain/structured-output.mdx index 5940becb05..0d90cfab94 100644 --- a/src/oss/langchain/structured-output.mdx +++ b/src/oss/langchain/structured-output.mdx @@ -105,7 +105,7 @@ LangChain automatically uses `ProviderStrategy` when you pass a schema type dire phone: str = Field(description="The phone number of the person") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ContactInfo # Auto-selects ProviderStrategy ) @@ -131,7 +131,7 @@ LangChain automatically uses `ProviderStrategy` when you pass a schema type dire phone: str # The phone number of the person agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ContactInfo # Auto-selects ProviderStrategy ) @@ -156,7 +156,7 @@ LangChain automatically uses `ProviderStrategy` when you pass a schema type dire phone: str # The phone number of the person agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ContactInfo # Auto-selects ProviderStrategy ) @@ -185,7 +185,7 @@ LangChain automatically uses `ProviderStrategy` when you pass a schema type dire } agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=contact_info_schema # Auto-selects ProviderStrategy ) @@ -342,7 +342,7 @@ class ToolStrategy(Generic[SchemaT]): key_points: list[str] = Field(description="The key points of the review. Lowercase, 1-3 words each.") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ToolStrategy(ProductReview) ) @@ -369,7 +369,7 @@ class ToolStrategy(Generic[SchemaT]): key_points: list[str] # The key points of the review agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ToolStrategy(ProductReview) ) @@ -395,7 +395,7 @@ class ToolStrategy(Generic[SchemaT]): key_points: list[str] # The key points of the review agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ToolStrategy(ProductReview) ) @@ -437,7 +437,7 @@ class ToolStrategy(Generic[SchemaT]): } agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ToolStrategy(product_review_schema) ) @@ -469,7 +469,7 @@ class ToolStrategy(Generic[SchemaT]): description: str = Field(description="Brief description of the complaint") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=tools, response_format=ToolStrategy(Union[ProductReview, CustomerComplaint]) ) @@ -500,7 +500,7 @@ class MeetingAction(BaseModel): priority: Literal["low", "medium", "high"] = Field(description="Priority level") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=[], response_format=ToolStrategy( schema=MeetingAction, @@ -746,7 +746,7 @@ class EventDetails(BaseModel): date: str = Field(description="Event date") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=[], response_format=ToolStrategy(Union[ContactInfo, EventDetails]) # Default: handle_errors=True ) @@ -861,7 +861,7 @@ class ProductRating(BaseModel): comment: str = Field(description="Review comment") agent = create_agent( - model="openai:gpt-5", + model="gpt-5", tools=[], response_format=ToolStrategy(ProductRating), # Default: handle_errors=True system_prompt="You are a helpful assistant that parses product reviews. Do not make any field or value up." diff --git a/src/oss/langchain/supervisor.mdx b/src/oss/langchain/supervisor.mdx index c499409beb..3e9b562ae9 100644 --- a/src/oss/langchain/supervisor.mdx +++ b/src/oss/langchain/supervisor.mdx @@ -753,7 +753,7 @@ def get_available_time_slots( # Step 2: Create specialized sub-agents # ============================================================================ -model = init_chat_model("anthropic:claude-3-5-haiku-latest") # for example +model = init_chat_model("claude-haiku-4-5-20251001") # for example calendar_agent = create_agent( model, @@ -931,7 +931,7 @@ const getAvailableTimeSlots = tool( // ============================================================================ const llm = new ChatAnthropic({ - model: "claude-3-5-haiku-latest", + model: "claude-haiku-4-5-20251001", }); const calendarAgent = createAgent({ diff --git a/src/oss/langchain/test.mdx b/src/oss/langchain/test.mdx index f14186cae6..0103663a67 100644 --- a/src/oss/langchain/test.mdx +++ b/src/oss/langchain/test.mdx @@ -134,7 +134,7 @@ def get_weather(city: str): """Get weather information for a city.""" return f"It's 75 degrees and sunny in {city}." -agent = create_agent("openai:gpt-4o", tools=[get_weather]) +agent = create_agent("gpt-4o", tools=[get_weather]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="strict", # [!code highlight] @@ -187,7 +187,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather] }); @@ -253,7 +253,7 @@ def get_events(city: str): """Get events happening in a city.""" return f"Concert at the park in {city} tonight." -agent = create_agent("openai:gpt-4o", tools=[get_weather, get_events]) +agent = create_agent("gpt-4o", tools=[get_weather, get_events]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="unordered", # [!code highlight] @@ -317,7 +317,7 @@ const getEvents = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather, getEvents] }); @@ -388,7 +388,7 @@ def get_detailed_forecast(city: str): """Get detailed weather forecast for a city.""" return f"Detailed forecast for {city}: sunny all week." -agent = create_agent("openai:gpt-4o", tools=[get_weather, get_detailed_forecast]) +agent = create_agent("gpt-4o", tools=[get_weather, get_detailed_forecast]) evaluator = create_trajectory_match_evaluator( # [!code highlight] trajectory_match_mode="superset", # [!code highlight] @@ -453,7 +453,7 @@ const getDetailedForecast = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather, getDetailedForecast] }); @@ -530,7 +530,7 @@ def get_weather(city: str): """Get weather information for a city.""" return f"It's 75 degrees and sunny in {city}." -agent = create_agent("openai:gpt-4o", tools=[get_weather]) +agent = create_agent("gpt-4o", tools=[get_weather]) evaluator = create_trajectory_llm_as_judge( # [!code highlight] model="openai:o3-mini", # [!code highlight] @@ -573,7 +573,7 @@ const getWeather = tool( ); const agent = createAgent({ - model: "openai:gpt-4o", + model: "gpt-4o", tools: [getWeather] }); diff --git a/src/oss/langgraph/add-memory.mdx b/src/oss/langgraph/add-memory.mdx index a2a6a794c2..6eb2f0817b 100644 --- a/src/oss/langgraph/add-memory.mdx +++ b/src/oss/langgraph/add-memory.mdx @@ -90,7 +90,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.postgres import PostgresSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable" with PostgresSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -133,7 +133,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable" async with AsyncPostgresSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -196,7 +196,7 @@ const graph = builder.compile({ checkpointer }); .register(registry, MessagesZodMeta), }); - const model = new ChatAnthropic({ model: "claude-3-5-haiku-20241022" }); + const model = new ChatAnthropic({ model: "claude-haiku-4-5-20251001" }); const DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"; const checkpointer = PostgresSaver.fromConnString(DB_URI); @@ -252,7 +252,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.mongodb import MongoDBSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "localhost:27017" with MongoDBSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -294,7 +294,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.mongodb.aio import AsyncMongoDBSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "localhost:27017" async with AsyncMongoDBSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -349,7 +349,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.redis import RedisSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "redis://localhost:6379" with RedisSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -392,7 +392,7 @@ const graph = builder.compile({ checkpointer }); from langgraph.graph import StateGraph, MessagesState, START from langgraph.checkpoint.redis.aio import AsyncRedisSaver # [!code highlight] - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "redis://localhost:6379" async with AsyncRedisSaver.from_conn_string(DB_URI) as checkpointer: # [!code highlight] @@ -580,7 +580,7 @@ const graph = builder.compile({ store }); from langgraph.store.postgres import PostgresStore # [!code highlight] from langgraph.store.base import BaseStore - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable" @@ -660,7 +660,7 @@ const graph = builder.compile({ store }); from langgraph.store.postgres.aio import AsyncPostgresStore # [!code highlight] from langgraph.store.base import BaseStore - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable" @@ -759,7 +759,7 @@ const graph = builder.compile({ store }); .register(registry, MessagesZodMeta), }); - const model = new ChatAnthropic({ model: "claude-3-5-haiku-20241022" }); + const model = new ChatAnthropic({ model: "claude-haiku-4-5-20251001" }); const DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"; @@ -852,7 +852,7 @@ const graph = builder.compile({ store }); from langgraph.store.redis import RedisStore # [!code highlight] from langgraph.store.base import BaseStore - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "redis://localhost:6379" @@ -932,7 +932,7 @@ const graph = builder.compile({ store }); from langgraph.store.redis.aio import AsyncRedisStore # [!code highlight] from langgraph.store.base import BaseStore - model = init_chat_model(model="anthropic:claude-3-5-haiku-latest") + model = init_chat_model(model="claude-haiku-4-5-20251001") DB_URI = "redis://localhost:6379" @@ -1060,6 +1060,7 @@ const items = await store.search(["user_123", "memories"], { :::python + ```python from langchain.embeddings import init_embeddings @@ -1068,7 +1069,7 @@ const items = await store.search(["user_123", "memories"], { from langgraph.store.memory import InMemoryStore from langgraph.graph import START, MessagesState, StateGraph - model = init_chat_model("openai:gpt-4o-mini") + model = init_chat_model("gpt-4o-mini") # Create store with semantic search enabled embeddings = init_embeddings("openai:text-embedding-3-small") @@ -1112,6 +1113,7 @@ const items = await store.search(["user_123", "memories"], { ::: :::js + ```typescript import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { StateGraph, START, MessagesZodMeta, InMemoryStore } from "@langchain/langgraph"; @@ -1254,7 +1256,7 @@ const builder = new StateGraph(MessagesZodState) from langchain.chat_models import init_chat_model from langgraph.graph import StateGraph, START, MessagesState - model = init_chat_model("anthropic:claude-sonnet-4-5") + model = init_chat_model("claude-sonnet-4-5-20250929") summarization_model = model.bind(max_tokens=128) def call_model(state: MessagesState): @@ -1625,7 +1627,7 @@ const summarizeConversation = async (state: z.infer) => { from langgraph.checkpoint.memory import InMemorySaver from langmem.short_term import SummarizationNode, RunningSummary # [!code highlight] - model = init_chat_model("anthropic:claude-sonnet-4-5") + model = init_chat_model("claude-sonnet-4-5-20250929") summarization_model = model.bind(max_tokens=128) class State(MessagesState): @@ -1718,7 +1720,7 @@ const summarizeConversation = async (state: z.infer) => { }); // We will use this model for both the conversation and the summarization - const model = new ChatAnthropic({ model: "claude-3-haiku-20240307" }); + const model = new ChatAnthropic({ model: "claude-haiku-4-5-20251001" }); // Define the logic to call the model const callModel = async (state: z.infer) => { diff --git a/src/oss/langgraph/agentic-rag.mdx b/src/oss/langgraph/agentic-rag.mdx index f760ba75b9..10e25abec4 100644 --- a/src/oss/langgraph/agentic-rag.mdx +++ b/src/oss/langgraph/agentic-rag.mdx @@ -209,7 +209,7 @@ Note that the components will operate on the [`MessagesState`](/oss/langgraph/gr from langgraph.graph import MessagesState from langchain.chat_models import init_chat_model - response_model = init_chat_model("openai:gpt-4o", temperature=0) + response_model = init_chat_model("gpt-4o", temperature=0) def generate_query_or_respond(state: MessagesState): @@ -340,7 +340,7 @@ Note that the components will operate on the [`MessagesState`](/oss/langgraph/gr ) - grader_model = init_chat_model("openai:gpt-4o", temperature=0) + grader_model = init_chat_model("gpt-4o", temperature=0) def grade_documents( diff --git a/src/oss/langgraph/interrupts.mdx b/src/oss/langgraph/interrupts.mdx index 557dacb0cc..6b5d45703f 100644 --- a/src/oss/langgraph/interrupts.mdx +++ b/src/oss/langgraph/interrupts.mdx @@ -579,7 +579,7 @@ This approach is useful when you want the approval logic to live with the tool i return "Email cancelled by user" - model = ChatAnthropic(model="claude-sonnet-4-5").bind_tools([send_email]) + model = ChatAnthropic(model="claude-sonnet-4-5-20250929").bind_tools([send_email]) def agent_node(state: AgentState): @@ -662,7 +662,7 @@ This approach is useful when you want the approval logic to live with the tool i }, ); - const model = new ChatAnthropic({ model: "claude-sonnet-4-5" }).bindTools([sendEmailTool]); + const model = new ChatAnthropic({ model: "claude-sonnet-4-5-20250929" }).bindTools([sendEmailTool]); const Message = z.object({ role: z.enum(["user", "assistant", "tool"]), diff --git a/src/oss/langgraph/quickstart.mdx b/src/oss/langgraph/quickstart.mdx index fe87ae204e..3e8fc3cee8 100644 --- a/src/oss/langgraph/quickstart.mdx +++ b/src/oss/langgraph/quickstart.mdx @@ -31,7 +31,7 @@ from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0 ) @@ -84,7 +84,7 @@ import { tool } from "@langchain/core/tools"; import * as z from "zod"; const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", temperature: 0, }); @@ -378,7 +378,7 @@ from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0 ) @@ -535,7 +535,7 @@ import { tool } from "@langchain/core/tools"; import * as z from "zod"; const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", temperature: 0, }); @@ -678,7 +678,7 @@ from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0 ) @@ -741,7 +741,7 @@ import { tool } from "@langchain/core/tools"; import * as z from "zod"; const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", temperature: 0, }); @@ -936,7 +936,7 @@ from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0 ) @@ -1052,7 +1052,7 @@ import { tool } from "@langchain/core/tools"; import * as z from "zod"; const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", temperature: 0, }); diff --git a/src/oss/langgraph/streaming.mdx b/src/oss/langgraph/streaming.mdx index 9ee3e4699f..c86c5a11ca 100644 --- a/src/oss/langgraph/streaming.mdx +++ b/src/oss/langgraph/streaming.mdx @@ -483,7 +483,7 @@ class MyState: joke: str = "" -model = init_chat_model(model="openai:gpt-4o-mini") +model = init_chat_model(model="gpt-4o-mini") def call_model(state: MyState): """Call the LLM to generate a joke about a topic""" @@ -571,9 +571,9 @@ You can associate `tags` with LLM invocations to filter the streamed tokens by L from langchain.chat_models import init_chat_model # model_1 is tagged with "joke" -model_1 = init_chat_model(model="openai:gpt-4o-mini", tags=['joke']) +model_1 = init_chat_model(model="gpt-4o-mini", tags=['joke']) # model_2 is tagged with "poem" -model_2 = init_chat_model(model="openai:gpt-4o-mini", tags=['poem']) +model_2 = init_chat_model(model="gpt-4o-mini", tags=['poem']) graph = ... # define a graph that uses these LLMs @@ -631,9 +631,9 @@ for await (const [msg, metadata] of await graph.stream( from langgraph.graph import START, StateGraph # The joke_model is tagged with "joke" - joke_model = init_chat_model(model="openai:gpt-4o-mini", tags=["joke"]) + joke_model = init_chat_model(model="gpt-4o-mini", tags=["joke"]) # The poem_model is tagged with "poem" - poem_model = init_chat_model(model="openai:gpt-4o-mini", tags=["poem"]) + poem_model = init_chat_model(model="gpt-4o-mini", tags=["poem"]) class State(TypedDict): @@ -1363,7 +1363,7 @@ Set `disable_streaming=True` when initializing the model. from langchain.chat_models import init_chat_model model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", # Set disable_streaming=True to disable streaming for the chat model disable_streaming=True # [!code highlight] @@ -1411,7 +1411,7 @@ This limits LangGraph ability to automatically propagate context, and affects La from langgraph.graph import START, StateGraph from langchain.chat_models import init_chat_model - model = init_chat_model(model="openai:gpt-4o-mini") + model = init_chat_model(model="gpt-4o-mini") class State(TypedDict): topic: str diff --git a/src/oss/langgraph/thinking-in-langgraph.mdx b/src/oss/langgraph/thinking-in-langgraph.mdx index 69ca18a041..4f0ae7f3de 100644 --- a/src/oss/langgraph/thinking-in-langgraph.mdx +++ b/src/oss/langgraph/thinking-in-langgraph.mdx @@ -534,7 +534,7 @@ We'll implement each node as a simple function. Remember: nodes take state, do w import { HumanMessage } from "@langchain/core/messages"; import { ChatAnthropic } from "@langchain/anthropic"; - const llm = new ChatAnthropic({ model: "claude-sonnet-4-5" }); + const llm = new ChatAnthropic({ model: "claude-sonnet-4-5-20250929" }); async function readEmail(state: EmailAgentStateType) { // Extract and parse email content @@ -739,7 +739,7 @@ We'll implement each node as a simple function. Remember: nodes take state, do w """Pause for human review using interrupt and route based on decision""" classification = state.get('classification', {}) - + # interrupt() must come first - any code before it will re-run on resume human_decision = interrupt({ "email_id": state.get('email_id',''), diff --git a/src/oss/langgraph/use-functional-api.mdx b/src/oss/langgraph/use-functional-api.mdx index c4f6b898a6..b37329d59e 100644 --- a/src/oss/langgraph/use-functional-api.mdx +++ b/src/oss/langgraph/use-functional-api.mdx @@ -124,7 +124,7 @@ await myWorkflow.invoke({ value: 1, anotherValue: 2 }); from langgraph.func import entrypoint, task from langgraph.checkpoint.memory import InMemorySaver - model = init_chat_model('openai:gpt-3.5-turbo') + model = init_chat_model('gpt-3.5-turbo') # Task: generate essay using an LLM @task @@ -230,7 +230,7 @@ const graph = entrypoint( from langgraph.checkpoint.memory import InMemorySaver # Initialize the LLM model - model = init_chat_model("openai:gpt-3.5-turbo") + model = init_chat_model("gpt-3.5-turbo") # Task that generates a paragraph about a given topic @task @@ -1573,7 +1573,7 @@ from langgraph.func import entrypoint, task from langgraph.checkpoint.memory import InMemorySaver from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-sonnet-4-5") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") @task def call_model(messages: list[BaseMessage]): @@ -1612,7 +1612,7 @@ import { } from "@langchain/langgraph"; import { ChatAnthropic } from "@langchain/anthropic"; -const model = new ChatAnthropic({ model: "claude-sonnet-4-5" }); +const model = new ChatAnthropic({ model: "claude-sonnet-4-5-20250929" }); const callModel = task( "callModel", diff --git a/src/oss/langgraph/use-graph-api.mdx b/src/oss/langgraph/use-graph-api.mdx index f5d51a2553..ac95c09509 100644 --- a/src/oss/langgraph/use-graph-api.mdx +++ b/src/oss/langgraph/use-graph-api.mdx @@ -1058,8 +1058,8 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ model_provider: str = "anthropic" MODELS = { - "anthropic": init_chat_model("anthropic:claude-3-5-haiku-latest"), - "openai": init_chat_model("openai:gpt-4.1-mini"), + "anthropic": init_chat_model("claude-haiku-4-5-20251001"), + "openai": init_chat_model("gpt-4.1-mini"), } def call_model(state: MessagesState, runtime: Runtime[ContextSchema]): @@ -1086,7 +1086,7 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ ``` ``` - claude-3-5-haiku-20241022 + claude-haiku-4-5-20251001 gpt-4.1-mini-2025-04-14 ``` ::: @@ -1114,7 +1114,7 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ }); const MODELS = { - anthropic: new ChatAnthropic({ model: "claude-3-5-haiku-latest" }), + anthropic: new ChatAnthropic({ model: "claude-haiku-4-5-20251001" }), openai: new ChatOpenAI({ model: "gpt-4o-mini" }), }; @@ -1144,7 +1144,7 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ ``` ``` - claude-3-5-haiku-20241022 + claude-haiku-4-5-20251001 gpt-4o-mini-2024-07-18 ``` ::: @@ -1168,8 +1168,8 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ system_message: str | None = None MODELS = { - "anthropic": init_chat_model("anthropic:claude-3-5-haiku-latest"), - "openai": init_chat_model("openai:gpt-4.1-mini"), + "anthropic": init_chat_model("claude-haiku-4-5-20251001"), + "openai": init_chat_model("gpt-4.1-mini"), } def call_model(state: MessagesState, runtime: Runtime[ContextSchema]): @@ -1227,7 +1227,7 @@ console.log(await graph.invoke({}, { context: { myRuntimeValue: "b" } })); // [ }); const MODELS = { - anthropic: new ChatAnthropic({ model: "claude-3-5-haiku-latest" }), + anthropic: new ChatAnthropic({ model: "claude-haiku-4-5-20251001" }), openai: new ChatOpenAI({ model: "gpt-4o-mini" }), }; @@ -1341,7 +1341,7 @@ By default, the retry policy retries on any exception except for the following: from langchain.messages import AIMessage db = SQLDatabase.from_uri("sqlite:///:memory:") - model = init_chat_model("anthropic:claude-3-5-haiku-latest") + model = init_chat_model("claude-haiku-4-5-20251001") def query_database(state: MessagesState): query_result = db.run("SELECT * FROM Artist LIMIT 10;") diff --git a/src/oss/langgraph/use-time-travel.mdx b/src/oss/langgraph/use-time-travel.mdx index 660224b8e2..e49944b43a 100644 --- a/src/oss/langgraph/use-time-travel.mdx +++ b/src/oss/langgraph/use-time-travel.mdx @@ -99,7 +99,7 @@ class State(TypedDict): model = init_chat_model( - "anthropic:claude-sonnet-4-5", + "claude-sonnet-4-5-20250929", temperature=0, ) @@ -149,7 +149,7 @@ const State = z.object({ }); const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", temperature: 0, }); diff --git a/src/oss/langgraph/workflows-agents.mdx b/src/oss/langgraph/workflows-agents.mdx index 5633cdd622..6440fef308 100644 --- a/src/oss/langgraph/workflows-agents.mdx +++ b/src/oss/langgraph/workflows-agents.mdx @@ -39,7 +39,7 @@ def _set_env(var: str): _set_env("ANTHROPIC_API_KEY") -llm = ChatAnthropic(model="claude-sonnet-4-5") +llm = ChatAnthropic(model="claude-sonnet-4-5-20250929") ``` ::: @@ -69,7 +69,7 @@ bun add @langchain/langgraph @langchain/core import { ChatAnthropic } from "@langchain/anthropic"; const llm = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", apiKey: "" }); ``` diff --git a/src/oss/python/integrations/callbacks/llmonitor.mdx b/src/oss/python/integrations/callbacks/llmonitor.mdx index 724f800107..653b662294 100644 --- a/src/oss/python/integrations/callbacks/llmonitor.mdx +++ b/src/oss/python/integrations/callbacks/llmonitor.mdx @@ -100,7 +100,7 @@ os.environ["SERPAPI_API_KEY"] = "" handler = LLMonitorCallbackHandler() llm = ChatOpenAI(temperature=0, callbacks=[handler]) tools = load_tools(["serpapi", "llm-math"], llm=llm) -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) input_message = { "role": "user", diff --git a/src/oss/python/integrations/chat/anthropic.mdx b/src/oss/python/integrations/chat/anthropic.mdx index 524f84bdeb..cfea511d86 100644 --- a/src/oss/python/integrations/chat/anthropic.mdx +++ b/src/oss/python/integrations/chat/anthropic.mdx @@ -74,7 +74,7 @@ Now we can instantiate our model object and generate chat completions: from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-3-5-haiku-latest", + model="claude-haiku-4-5-20251001", temperature=0, max_tokens=1024, timeout=None, @@ -117,7 +117,7 @@ When using tools, [extended thinking](#extended-thinking), and other features, c from langchain_anthropic import ChatAnthropic from typing_extensions import Annotated -model = ChatAnthropic(model="claude-3-5-haiku-latest") +model = ChatAnthropic(model="claude-haiku-4-5-20251001") def get_weather( @@ -210,7 +210,7 @@ image_file_id = file.id from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", betas=["files-api-2025-04-14"], ) @@ -250,7 +250,7 @@ pdf_file_id = file.id from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", betas=["files-api-2025-04-14"], ) @@ -282,7 +282,7 @@ import json from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", max_tokens=5000, thinking={"type": "enabled", "budget_tokens": 2000}, ) @@ -317,7 +317,7 @@ To enable caching on an element of a prompt, mark its associated content block u import requests from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-3-7-sonnet-20250219") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") # Pull LangChain readme get_response = requests.get( @@ -371,7 +371,7 @@ Second: ```python model = ChatAnthropic( - model="claude-3-7-sonnet-20250219", + model="claude-sonnet-4-5-20250929", betas=["extended-cache-ttl-2025-04-11"], # [!code highlight] ) ``` @@ -421,7 +421,7 @@ def get_weather(location: str) -> str: weather_tool = convert_to_anthropic_tool(get_weather) # [!code highlight] weather_tool["cache_control"] = {"type": "ephemeral"} # [!code highlight] -model = ChatAnthropic(model="claude-3-7-sonnet-20250219") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") model_with_tools = model.bind_tools([weather_tool]) query = "What's the weather in San Francisco?" @@ -458,7 +458,7 @@ from langgraph.checkpoint.memory import MemorySaver from langgraph.graph import START, StateGraph, add_messages from typing_extensions import Annotated, TypedDict -model = ChatAnthropic(model="claude-3-7-sonnet-20250219") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") # Pull LangChain readme get_response = requests.get( @@ -584,7 +584,7 @@ from langchain_anthropic import ChatAnthropic from langchain.tools import tool model = ChatAnthropic( - model="claude-3-7-sonnet-20250219", + model="claude-sonnet-4-5-20250929", betas=["token-efficient-tools-2025-02-19"], # [!code highlight] ) @@ -618,7 +618,7 @@ In this example we pass a [plain text document](https://docs.claude.com/en/docs/ ```python from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-3-5-haiku-latest") +model = ChatAnthropic(model="claude-haiku-4-5-20251001") messages = [ { @@ -781,7 +781,7 @@ async def retrieval_tool( # Create agent -model = init_chat_model("anthropic:claude-3-5-haiku-latest") +model = init_chat_model("claude-haiku-4-5-20251001") checkpointer = InMemorySaver() agent = create_agent(model, [retrieval_tool], checkpointer=checkpointer) @@ -848,7 +848,7 @@ message = { } # Query model -model = ChatAnthropic(model="claude-3-5-haiku-latest") +model = ChatAnthropic(model="claude-haiku-4-5-20251001") response = model.invoke([message]) ``` @@ -866,7 +866,7 @@ See [Anthropic documentation](https://docs.claude.com/en/docs/build-with-claude/ from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", betas=["context-management-2025-06-27"], context_management={"edits": [{"type": "clear_tool_uses_20250919"}]}, ) @@ -889,7 +889,7 @@ Claude can use a [web search tool](https://docs.claude.com/en/docs/agents-and-to ```python from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-sonnet-4-5") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") tool = {"type": "web_search_20250305", "name": "web_search", "max_uses": 3} model_with_tools = model.bind_tools([tool]) @@ -905,7 +905,7 @@ from langchain_anthropic import ChatAnthropic ```python model = ChatAnthropic( - model="claude-3-5-haiku-latest", + model="claude-haiku-4-5-20251001", betas=["web-fetch-2025-09-10"], # Enable web fetch beta ) @@ -933,7 +933,7 @@ Claude can use a [code execution tool](https://docs.claude.com/en/docs/agents-an from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", betas=["code-execution-2025-05-22"], ) @@ -965,7 +965,7 @@ file_id = file.id from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", betas=["code-execution-2025-05-22"], ) @@ -1020,7 +1020,7 @@ Claude supports a memory tool for client-side storage and retrieval of context a from langchain_anthropic import ChatAnthropic model = ChatAnthropic( - model="claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", betas=["context-management-2025-06-27"], ) model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}]) @@ -1053,7 +1053,7 @@ mcp_servers = [ ] model = ChatAnthropic( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", betas=["mcp-client-2025-04-04"], mcp_servers=mcp_servers, ) @@ -1071,7 +1071,7 @@ The text editor tool can be used to view and modify text files. See docs [here]( ```python from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-3-7-sonnet-20250219") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") tool = {"type": "text_editor_20250124", "name": "str_replace_editor"} model_with_tools = model.bind_tools([tool]) diff --git a/src/oss/python/integrations/chat/bedrock.mdx b/src/oss/python/integrations/chat/bedrock.mdx index de310513d4..fd498f4e01 100644 --- a/src/oss/python/integrations/chat/bedrock.mdx +++ b/src/oss/python/integrations/chat/bedrock.mdx @@ -210,7 +210,7 @@ To enable caching on an element of a prompt, mark its associated content block u import requests from langchain_aws import ChatBedrockConverse -llm = ChatBedrockConverse(model="us.anthropic.claude-3-7-sonnet-20250219-v1:0") +llm = ChatBedrockConverse(model="us.anthropic.claude-sonnet-4-5-20250929-v1:0") # Pull LangChain readme get_response = requests.get( diff --git a/src/oss/python/integrations/providers/cratedb.mdx b/src/oss/python/integrations/providers/cratedb.mdx index 06d98ae180..bd8a8209fa 100644 --- a/src/oss/python/integrations/providers/cratedb.mdx +++ b/src/oss/python/integrations/providers/cratedb.mdx @@ -147,7 +147,7 @@ set_llm_cache(CrateDBCache(engine)) # Invoke LLM conversation. llm = ChatOpenAI( - model_name="chatgpt-4o-latest", + model_name="gpt-4o", temperature=0.7, ) print() @@ -184,7 +184,7 @@ set_llm_cache( ) # Invoke LLM conversation. -llm = ChatOpenAI(model_name="chatgpt-4o-latest") +llm = ChatOpenAI(model_name="gpt-4o") print() print("Asking with semantic cache:") answer = llm.invoke("What is the answer to everything?") diff --git a/src/oss/python/integrations/providers/moorcheh.mdx b/src/oss/python/integrations/providers/moorcheh.mdx index 75dd60422d..270d9d090d 100644 --- a/src/oss/python/integrations/providers/moorcheh.mdx +++ b/src/oss/python/integrations/providers/moorcheh.mdx @@ -55,7 +55,7 @@ Moorcheh supports generative AI responses using various LLM models including Cla query = "What are the main topics covered in the documents?" answer = store.generative_answer( query, - ai_model="anthropic.claude-3-7-sonnet-20250219-v1:0" + ai_model="anthropic.claude-sonnet-4-5-20250929-v1:0" ) print(answer) ``` diff --git a/src/oss/python/integrations/providers/tensorlake.mdx b/src/oss/python/integrations/providers/tensorlake.mdx index 2bfd738123..2b05a47eeb 100644 --- a/src/oss/python/integrations/providers/tensorlake.mdx +++ b/src/oss/python/integrations/providers/tensorlake.mdx @@ -66,7 +66,7 @@ import os async def main(question): # Create the agent with the Tensorlake tool agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=[document_markdown_tool], prompt=( """ diff --git a/src/oss/python/integrations/tools/TEMPLATE.mdx b/src/oss/python/integrations/tools/TEMPLATE.mdx index 83cf8dbdd1..8883da5397 100644 --- a/src/oss/python/integrations/tools/TEMPLATE.mdx +++ b/src/oss/python/integrations/tools/TEMPLATE.mdx @@ -113,7 +113,7 @@ from langchain.agents import create_agent tools = [tool] agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, ) diff --git a/src/oss/python/integrations/tools/bash.mdx b/src/oss/python/integrations/tools/bash.mdx index 463d2a126b..53816fe72b 100644 --- a/src/oss/python/integrations/tools/bash.mdx +++ b/src/oss/python/integrations/tools/bash.mdx @@ -43,7 +43,7 @@ from langchain.agents import create_agent tools = [shell_tool] -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) input_message = { "role": "user", diff --git a/src/oss/python/integrations/tools/google_jobs.mdx b/src/oss/python/integrations/tools/google_jobs.mdx index 9b70a08e9c..7ca4033d5c 100644 --- a/src/oss/python/integrations/tools/google_jobs.mdx +++ b/src/oss/python/integrations/tools/google_jobs.mdx @@ -77,7 +77,7 @@ tools = load_tools(["google-jobs"]) from langchain.agents import create_agent -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` ```python diff --git a/src/oss/python/integrations/tools/graphql.mdx b/src/oss/python/integrations/tools/graphql.mdx index dc71200416..01fe7a5091 100644 --- a/src/oss/python/integrations/tools/graphql.mdx +++ b/src/oss/python/integrations/tools/graphql.mdx @@ -49,7 +49,7 @@ tools = load_tools( from langchain.agents import create_agent -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` Now, we can use the Agent to run queries against the Star Wars GraphQL API. Let's ask the Agent to list all the Star Wars films and their release dates. diff --git a/src/oss/python/integrations/tools/openweathermap.mdx b/src/oss/python/integrations/tools/openweathermap.mdx index 83573ff9ad..8bd661989f 100644 --- a/src/oss/python/integrations/tools/openweathermap.mdx +++ b/src/oss/python/integrations/tools/openweathermap.mdx @@ -65,7 +65,7 @@ os.environ["OPENAI_API_KEY"] = "" os.environ["OPENWEATHERMAP_API_KEY"] = "" tools = [weather.run] -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` ```python diff --git a/src/oss/python/integrations/tools/playwright.mdx b/src/oss/python/integrations/tools/playwright.mdx index 1a88bf14bb..85d3d5aa51 100644 --- a/src/oss/python/integrations/tools/playwright.mdx +++ b/src/oss/python/integrations/tools/playwright.mdx @@ -121,7 +121,7 @@ from langchain.agents import create_agent model = ChatAnthropic( - model_name="claude-3-haiku-20240307", temperature=0 + model_name="claude-haiku-4-5-20251001", temperature=0 ) # or any other LLM, e.g., ChatOpenAI(), OpenAI() agent_chain = create_agent(model=model, tools=tools) diff --git a/src/oss/python/integrations/tools/privy.mdx b/src/oss/python/integrations/tools/privy.mdx index 8fd19549ef..217859b01d 100644 --- a/src/oss/python/integrations/tools/privy.mdx +++ b/src/oss/python/integrations/tools/privy.mdx @@ -44,7 +44,7 @@ print(f"Wallet created! Address: {privy_tool.wallet_address}") # Create agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[privy_tool], ) @@ -143,7 +143,7 @@ tools = [PrivyWalletTool()] # Create agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, ) diff --git a/src/oss/python/integrations/tools/riza.mdx b/src/oss/python/integrations/tools/riza.mdx index e4d6429269..ac984585b4 100644 --- a/src/oss/python/integrations/tools/riza.mdx +++ b/src/oss/python/integrations/tools/riza.mdx @@ -42,7 +42,7 @@ tools = [ExecPython()] Initialize an agent using Anthropic's Claude Haiku model. ```python -llm = ChatAnthropic(model="claude-3-haiku-20240307", temperature=0) +llm = ChatAnthropic(model="claude-haiku-4-5-20251001", temperature=0) prompt_template = ChatPromptTemplate.from_messages( [ diff --git a/src/oss/python/integrations/tools/salesforce.mdx b/src/oss/python/integrations/tools/salesforce.mdx index 2a7880d579..9cbeb7baae 100644 --- a/src/oss/python/integrations/tools/salesforce.mdx +++ b/src/oss/python/integrations/tools/salesforce.mdx @@ -164,7 +164,7 @@ tool = SalesforceTool( ) # Initialize Anthropic LLM -llm = ChatAnthropic(model="claude-sonnet-4-20250514") +llm = ChatAnthropic(model="claude-sonnet-4-5-20250929") # First, let's query some contacts to get real data contacts_query = { diff --git a/src/oss/python/integrations/tools/searchapi.mdx b/src/oss/python/integrations/tools/searchapi.mdx index 567734c5f5..a090cee834 100644 --- a/src/oss/python/integrations/tools/searchapi.mdx +++ b/src/oss/python/integrations/tools/searchapi.mdx @@ -52,7 +52,7 @@ tools = [ from langchain.agents import create_agent -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` ```python diff --git a/src/oss/python/integrations/tools/steam.mdx b/src/oss/python/integrations/tools/steam.mdx index bf5d407669..47d9d1e7b7 100644 --- a/src/oss/python/integrations/tools/steam.mdx +++ b/src/oss/python/integrations/tools/steam.mdx @@ -57,7 +57,7 @@ tools = [steam.run] ```python from langchain.agents import create_agent -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` ```python diff --git a/src/oss/python/integrations/tools/yahoo_finance_news.mdx b/src/oss/python/integrations/tools/yahoo_finance_news.mdx index 770d526621..ee415104a5 100644 --- a/src/oss/python/integrations/tools/yahoo_finance_news.mdx +++ b/src/oss/python/integrations/tools/yahoo_finance_news.mdx @@ -30,7 +30,7 @@ from langchain.agents import create_agent tools = [YahooFinanceNewsTool()] -agent = create_agent("openai:gpt-4.1-mini", tools) +agent = create_agent("gpt-4.1-mini", tools) ``` ```output diff --git a/src/oss/python/integrations/vectorstores/moorcheh.mdx b/src/oss/python/integrations/vectorstores/moorcheh.mdx index 1d82931edf..ecb91e8854 100644 --- a/src/oss/python/integrations/vectorstores/moorcheh.mdx +++ b/src/oss/python/integrations/vectorstores/moorcheh.mdx @@ -153,7 +153,7 @@ Once your namespace has been created and you have uploaded documents into it, yo ```python query = "Give me a brief summary of the provided documents" -answer = store.generative_answer(query, ai_model = "anthropic.claude-3-7-sonnet-20250219-v1:0") +answer = store.generative_answer(query, ai_model = "anthropic.claude-sonnet-4-5-20250929-v1:0") print(answer) ``` diff --git a/src/oss/python/migrate/langchain-v1.mdx b/src/oss/python/migrate/langchain-v1.mdx index 56090012fc..60a4ac431d 100644 --- a/src/oss/python/migrate/langchain-v1.mdx +++ b/src/oss/python/migrate/langchain-v1.mdx @@ -118,7 +118,7 @@ The `prompt` parameter has been renamed to @[`system_prompt`]: from langchain.agents import create_agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[check_weather], system_prompt="You are a helpful assistant" # [!code highlight] ) @@ -127,7 +127,7 @@ agent = create_agent( from langgraph.prebuilt import create_react_agent agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[check_weather], prompt="You are a helpful assistant" # [!code highlight] ) @@ -143,7 +143,7 @@ If using @[`SystemMessage`] objects in the system prompt, extract the string con from langchain.agents import create_agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[check_weather], system_prompt="You are a helpful assistant" # [!code highlight] ) @@ -153,7 +153,7 @@ from langchain.messages import SystemMessage from langgraph.prebuilt import create_react_agent agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[check_weather], prompt=SystemMessage(content="You are a helpful assistant") # [!code highlight] ) @@ -196,7 +196,7 @@ def dynamic_prompt(request: ModelRequest) -> str: # [!code highlight] return prompt # [!code highlight] agent = create_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=tools, middleware=[dynamic_prompt], # [!code highlight] context_schema=Context @@ -231,7 +231,7 @@ def dynamic_prompt(state: AgentState) -> str: return base_prompt agent = create_react_agent( - model="openai:gpt-4o", + model="gpt-4o", tools=tools, prompt=dynamic_prompt, context_schema=Context @@ -265,11 +265,11 @@ from langchain.agents import create_agent from langchain.agents.middleware import SummarizationMiddleware agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, middleware=[ SummarizationMiddleware( # [!code highlight] - model="anthropic:claude-sonnet-4-5", # [!code highlight] + model="claude-sonnet-4-5-20250929", # [!code highlight] max_tokens_before_summary=1000 # [!code highlight] ) # [!code highlight] ] # [!code highlight] @@ -283,7 +283,7 @@ def custom_summarization_function(state: AgentState): ... agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, pre_model_hook=custom_summarization_function ) @@ -308,7 +308,7 @@ from langchain.agents import create_agent from langchain.agents.middleware import HumanInTheLoopMiddleware agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[read_email, send_email], middleware=[HumanInTheLoopMiddleware( interrupt_on={ @@ -328,7 +328,7 @@ def custom_human_in_the_loop_hook(state: AgentState): ... agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[read_email, send_email], post_model_hook=custom_human_in_the_loop_hook ) @@ -371,7 +371,7 @@ def greet( return f"Hello {user_name}!" agent = create_agent( # [!code highlight] - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[greet], state_schema=CustomState # [!code highlight] ) @@ -392,7 +392,7 @@ def greet( return f"Hello {user_name}!" agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[greet], state_schema=CustomState ) @@ -425,7 +425,7 @@ class CallCounterMiddleware(AgentMiddleware[CustomState]): return {"model_call_count": state.get("model_call_count", 0) + 1} agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[...], middleware=[CallCounterMiddleware()] # [!code highlight] ) @@ -446,7 +446,7 @@ class CustomAgentState(AgentState): # [!code highlight] user_id: str agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, state_schema=CustomAgentState # [!code highlight] ) @@ -466,7 +466,7 @@ class AgentState(BaseModel): # [!code highlight] user_id: str agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=tools, state_schema=AgentState ) @@ -546,7 +546,7 @@ model_with_tools = ChatOpenAI().bind_tools([some_tool]) agent = create_agent(model_with_tools, tools=[]) # Use instead -agent = create_agent("openai:gpt-4o-mini", tools=[some_tool]) +agent = create_agent("gpt-4o-mini", tools=[some_tool]) ``` @@ -568,7 +568,7 @@ The argument will no longer accept @[`ToolNode`] instances. from langchain.agents import create_agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[check_weather, search_web] ) ``` @@ -577,7 +577,7 @@ from langgraph.prebuilt import create_react_agent, ToolNode agent = create_react_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=ToolNode([check_weather, search_web]) # [!code highlight] ) ``` @@ -623,7 +623,7 @@ class OutputSchema(BaseModel): # Using ToolStrategy agent = create_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=tools, # explicitly using tool strategy response_format=ToolStrategy(OutputSchema) # [!code highlight] @@ -639,7 +639,7 @@ class OutputSchema(BaseModel): sentiment: str agent = create_react_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=tools, # using tool strategy by default with no option for provider strategy response_format=OutputSchema # [!code highlight] @@ -648,7 +648,7 @@ agent = create_react_agent( # OR agent = create_react_agent( - model="openai:gpt-4o-mini", + model="gpt-4o-mini", tools=tools, # using a custom prompt to instruct the model to generate the output schema response_format=("please generate ...", OutputSchema) # [!code highlight] @@ -739,7 +739,7 @@ In v1, messages gain provider-agnostic standard content blocks. Access them via ```python v1 (new) from langchain.chat_models import init_chat_model -model = init_chat_model("openai:gpt-5-nano") +model = init_chat_model("gpt-5-nano") response = model.invoke("Explain AI") for block in response.content_blocks: @@ -817,7 +817,7 @@ export LC_OUTPUT_VERSION=v1 from langchain.chat_models import init_chat_model model = init_chat_model( - "openai:gpt-5-nano", + "gpt-5-nano", output_version="v1", ) ``` diff --git a/src/oss/python/releases/langchain-v1.mdx b/src/oss/python/releases/langchain-v1.mdx index da2bb130d1..a0fb2120d7 100644 --- a/src/oss/python/releases/langchain-v1.mdx +++ b/src/oss/python/releases/langchain-v1.mdx @@ -38,7 +38,7 @@ For a complete list of changes, see the [migration guide](/oss/migrate/langchain from langchain.agents import create_agent agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[search_web, analyze_data, send_email], system_prompt="You are a helpful research assistant." ) @@ -86,7 +86,7 @@ from langchain.agents.middleware import ( agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[read_email, send_email], middleware=[ PIIMiddleware("email", strategy="redact", apply_to_input=True), @@ -100,7 +100,7 @@ agent = create_agent( strategy="block" ), SummarizationMiddleware( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", max_tokens_before_summary=500 ), HumanInTheLoopMiddleware( @@ -166,11 +166,11 @@ class ExpertiseBasedToolMiddleware(AgentMiddleware): if user_level == "expert": # More powerful model - model = ChatOpenAI(model="openai:gpt-5") + model = ChatOpenAI(model="gpt-5") tools = [advanced_search, data_analysis] else: # Less powerful model - model = ChatOpenAI(model="openai:gpt-5-nano") + model = ChatOpenAI(model="gpt-5-nano") tools = [simple_search, basic_calculator] request.model = model @@ -178,7 +178,7 @@ class ExpertiseBasedToolMiddleware(AgentMiddleware): return handler(request) agent = create_agent( - model="anthropic:claude-sonnet-4-5", + model="claude-sonnet-4-5-20250929", tools=[ simple_search, advanced_search, @@ -236,7 +236,7 @@ def weather_tool(city: str) -> str: return f"it's sunny and 70 degrees in {city}" agent = create_agent( - "openai:gpt-4o-mini", + "gpt-4o-mini", tools=[weather_tool], response_format=ToolStrategy(Weather) ) @@ -274,7 +274,7 @@ The new @[`content_blocks`][BaseMessage(content_blocks)] property introduces a s ```python from langchain_anthropic import ChatAnthropic -model = ChatAnthropic(model="claude-sonnet-4-5") +model = ChatAnthropic(model="claude-sonnet-4-5-20250929") response = model.invoke("What's the capital of France?") # Unified access to content blocks diff --git a/src/snippets/chat-model-tabs-js.mdx b/src/snippets/chat-model-tabs-js.mdx index f85c860252..f98387517f 100644 --- a/src/snippets/chat-model-tabs-js.mdx +++ b/src/snippets/chat-model-tabs-js.mdx @@ -23,7 +23,7 @@ process.env.OPENAI_API_KEY = "your-api-key"; - const model = await initChatModel("openai:gpt-4.1"); + const model = await initChatModel("gpt-4.1"); ``` ```typescript Model Class import { ChatOpenAI } from "@langchain/openai"; @@ -59,13 +59,13 @@ process.env.ANTHROPIC_API_KEY = "your-api-key"; - const model = await initChatModel("anthropic:claude-sonnet-4-5"); + const model = await initChatModel("claude-sonnet-4-5-20250929"); ``` ```typescript Model Class import { ChatAnthropic } from "@langchain/anthropic"; const model = new ChatAnthropic({ - model: "claude-sonnet-4-5", + model: "claude-sonnet-4-5-20250929", apiKey: "your-api-key" }); ``` diff --git a/src/snippets/chat-model-tabs.mdx b/src/snippets/chat-model-tabs.mdx index 2f0ee6e981..242f395913 100644 --- a/src/snippets/chat-model-tabs.mdx +++ b/src/snippets/chat-model-tabs.mdx @@ -13,7 +13,7 @@ os.environ["OPENAI_API_KEY"] = "sk-..." - model = init_chat_model("openai:gpt-4.1") + model = init_chat_model("gpt-4.1") ``` ```python Model Class import os @@ -38,7 +38,7 @@ os.environ["ANTHROPIC_API_KEY"] = "sk-..." - model = init_chat_model("anthropic:claude-sonnet-4-5") + model = init_chat_model("claude-sonnet-4-5-20250929") ``` ```python Model Class import os @@ -46,7 +46,7 @@ os.environ["ANTHROPIC_API_KEY"] = "sk-..." - model = ChatAnthropic(model="claude-sonnet-4-5") + model = ChatAnthropic(model="claude-sonnet-4-5-20250929") ``` diff --git a/src/snippets/oss/studio.mdx b/src/snippets/oss/studio.mdx index a64d6594ec..896d1f1587 100644 --- a/src/snippets/oss/studio.mdx +++ b/src/snippets/oss/studio.mdx @@ -54,7 +54,7 @@ def send_email(to: str, subject: str, body: str): return f"Email sent to {to}" agent = create_agent( - "openai:gpt-4o", + "gpt-4o", tools=[send_email], system_prompt="You are an email assistant. Always use the send_email tool.", ) diff --git a/src/snippets/oss/ui-js.mdx b/src/snippets/oss/ui-js.mdx index 31008b53e4..b2c8b41cff 100644 --- a/src/snippets/oss/ui-js.mdx +++ b/src/snippets/oss/ui-js.mdx @@ -1,4 +1,4 @@ -LangChain provides a powerful prebuilt user interface that work seamlessly with agents created using [`create_agent()`](/oss/javascript/langchain/agents). This UI is designed to provide rich, interactive experiences for your agents with minimal setup, whether you're running locally or in a deployed context (such as [LangSmith](/langsmith/)). +LangChain provides a powerful prebuilt user interface that work seamlessly with agents created using [`create_agent`](/oss/javascript/langchain/agents). This UI is designed to provide rich, interactive experiences for your agents with minimal setup, whether you're running locally or in a deployed context (such as [LangSmith](/langsmith/)). ## Agent Chat UI diff --git a/src/snippets/trace-with-anthropic.mdx b/src/snippets/trace-with-anthropic.mdx index 285d1e5991..f9898c9d83 100644 --- a/src/snippets/trace-with-anthropic.mdx +++ b/src/snippets/trace-with-anthropic.mdx @@ -31,7 +31,7 @@ def chat_pipeline(question: str): { "role": "user", "content": f"Question: {question}\nContext: {context}"} ] messages = client.messages.create( - model="claude-sonnet-4-20250514", + model="claude-sonnet-4-5-20250929", messages=messages, max_tokens=1024, system="You are a helpful assistant. Please respond to the user's request only based on the given context."