Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 28 additions & 9 deletions pipeline/preprocessors/link_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,17 +147,36 @@ class LinkMap(TypedDict):
# Integrations
# langchain-openai
"langchain-openai": "integrations/langchain_openai",
"BaseChatOpenAI": "integrations/langchain_openai/BaseChatOpenAI/",
"ChatOpenAI": "integrations/langchain_openai/ChatOpenAI/",
"AzureChatOpenAI": "integrations/langchain_openai/AzureChatOpenAI/",
"OpenAI": "integrations/langchain_openai/OpenAI/",
"AzureOpenAI": "integrations/langchain_openai/AzureOpenAI/",
"OpenAIEmbeddings": "integrations/langchain_openai/OpenAIEmbeddings/",
"AzureOpenAIEmbeddings": "integrations/langchain_openai/AzureOpenAIEmbeddings/",
"BaseChatOpenAI": "integrations/langchain_openai/BaseChatOpenAI",
"ChatOpenAI": "integrations/langchain_openai/ChatOpenAI",
"AzureChatOpenAI": "integrations/langchain_openai/AzureChatOpenAI",
"OpenAI": "integrations/langchain_openai/OpenAI",
"AzureOpenAI": "integrations/langchain_openai/AzureOpenAI",
"OpenAIEmbeddings": "integrations/langchain_openai/OpenAIEmbeddings",
"AzureOpenAIEmbeddings": "integrations/langchain_openai/AzureOpenAIEmbeddings",
# langchain-anthropic
"langchain-anthropic": "integrations/langchain_anthropic",
"ChatAnthropic": "integrations/langchain_anthropic/ChatAnthropic/",
"AnthropicLLM": "integrations/langchain_anthropic/AnthropicLLM/",
"ChatAnthropic": "integrations/langchain_anthropic/ChatAnthropic",
"AnthropicLLM": "integrations/langchain_anthropic/AnthropicLLM",
# langchain-google
"langchain-google": "integrations/langchain_google",
"langchain-google-genai": "integrations/langchain_google_genai",
"ChatGoogleGenerativeAI": "integrations/langchain_google_genai/#langchain_google_genai.ChatGoogleGenerativeAI",
"langchain-google-vertexai": "integrations/langchain_google_vertexai",
"ChatVertexAI": "integrations/langchain_google_vertexai/#langchain_google_vertexai.ChatVertexAI",
"langchain-google-community": "integrations/langchain_google_community/",
# langchain-ollama
"langchain-ollama": "integrations/langchain_ollama",
"ChatOllama": "integrations/langchain_ollama/#langchain_ollama.ChatOllama",
# langchain-xai
"langchain-xai": "integrations/langchain_xai",
"ChatXAI": "integrations/langchain_xai/#langchain_xai.ChatXAI",
# langchain-groq
"langchain-groq": "integrations/langchain_groq",
"ChatGroq": "integrations/langchain_groq/#langchain_groq.ChatGroq",
# langchain-deepseek
"langchain-deepseek": "integrations/langchain_deepseek",
"ChatDeepSeek": "integrations/langchain_deepseek/#langchain_deepseek.ChatDeepSeek",
# Models
"init_chat_model": "langchain/models/#langchain.chat_models.init_chat_model",
"init_chat_model(model)": "langchain/models/#langchain.chat_models.init_chat_model(model)",
Expand Down
Binary file added src/images/cat.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 5 additions & 6 deletions src/oss/javascript/integrations/chat/google_generative_ai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Now we can instantiate our model object and generate chat completions:
import { ChatGoogleGenerativeAI } from "@langchain/google-genai"

const llm = new ChatGoogleGenerativeAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
maxRetries: 2,
// other params...
Expand Down Expand Up @@ -168,7 +168,7 @@ import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
import { HarmBlockThreshold, HarmCategory } from "@google/generative-ai";

const llmWithSafetySettings = new ChatGoogleGenerativeAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
safetySettings: [
{
Expand Down Expand Up @@ -255,7 +255,7 @@ const searchRetrievalTool: GoogleSearchRetrievalTool = {
}
};
const searchRetrievalModel = new ChatGoogleGenerativeAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
maxRetries: 0,
}).bindTools([searchRetrievalTool]);
Expand Down Expand Up @@ -453,7 +453,7 @@ const codeExecutionTool: CodeExecutionTool = {
codeExecution: {}, // Simply pass an empty object to enable it.
};
const codeExecutionModel = new ChatGoogleGenerativeAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
maxRetries: 0,
}).bindTools([codeExecutionTool]);
Expand Down Expand Up @@ -564,7 +564,7 @@ const fileResult = await fileManager.uploadFile(pathToVideoFile, {

// creates cached content AFTER uploading is finished
const cachedContent = await cacheManager.create({
model: "models/gemini-1.5-flash-001",
model: "models/gemini-2.5-flash",
displayName: displayName,
systemInstruction: "You are an expert video analyzer, and your job is to answer " +
"the user's query based on the video file you have access to.",
Expand Down Expand Up @@ -594,7 +594,6 @@ await model.invoke("Summarize the video");

**Note**

- Context caching supports both Gemini 1.5 Pro and Gemini 1.5 Flash. Context caching is only available for stable models with fixed versions (for example, gemini-1.5-pro-001). You must include the version postfix (for example, the -001 in gemini-1.5-pro-001).
- The minimum input token count for context caching is 32,768, and the maximum is the same as the maximum for the given model.

## Gemini Prompting FAQs
Expand Down
8 changes: 4 additions & 4 deletions src/oss/javascript/integrations/chat/google_vertex_ai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ const searchRetrievalTool = {
};

const searchRetrievalModel = new ChatVertexAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
maxRetries: 0,
}).bindTools([searchRetrievalTool]);
Expand Down Expand Up @@ -218,7 +218,7 @@ const searchRetrievalToolWithDataset = {
};

const searchRetrievalModelWithDataset = new ChatVertexAI({
model: "gemini-1.5-pro",
model: "gemini-2.5-pro",
temperature: 0,
maxRetries: 0,
}).bindTools([searchRetrievalToolWithDataset]);
Expand Down Expand Up @@ -248,7 +248,7 @@ Once you've created a cache, you can pass its id in as a runtime param as follow
import { ChatVertexAI } from "@langchain/google-vertexai";

const modelWithCachedContent = new ChatVertexAI({
model: "gemini-1.5-pro-002",
model: "gemini-2.5-pro-002",
location: "us-east5",
});

Expand All @@ -262,7 +262,7 @@ You can also bind this field directly onto the model instance:

```typescript
const modelWithBoundCachedContent = new ChatVertexAI({
model: "gemini-1.5-pro-002",
model: "gemini-2.5-pro-002",
location: "us-east5",
}).bind({
cachedContent:
Expand Down
2 changes: 1 addition & 1 deletion src/oss/javascript/integrations/chat/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ title: Chat models
import { ChatVertexAI } from "@langchain/google-vertexai";

const model = new ChatVertexAI({
model: "gemini-1.5-flash",
model: "gemini-2.5-flash",
temperature: 0
});
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ You may be looking for [this page instead](/oss/integrations/chat/google_vertex_
</Warning>


[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-1.5-pro`, `gemini-1.5-flash`, etc.
[Google Vertex](https://cloud.google.com/vertex-ai) is a service that exposes all foundation models available in Google Cloud, like `gemini-2.5-pro`, `gemini-2.5-flash`, etc.

This will help you get started with VertexAI completion models (LLMs) using LangChain. For detailed documentation on `VertexAI` features and configuration options, please refer to the [API reference](https://api.js.langchain.com/classes/langchain_google_vertexai.VertexAI.html).

Expand Down
4 changes: 2 additions & 2 deletions src/oss/javascript/integrations/providers/google.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ and [AI Studio](https://aistudio.google.com/)

### Gemini Models

Access Gemini models such as `gemini-1.5-pro` and `gemini-2.0-flex` through the [`ChatGoogleGenerativeAI`](/oss/integrations/chat/google_generative_ai),
Access Gemini models such as `gemini-2.5-pro` and `gemini-2.0-flex` through the [`ChatGoogleGenerativeAI`](/oss/integrations/chat/google_generative_ai),
or if using VertexAI, via the [`ChatVertexAI`](/oss/integrations/chat/google_vertex_ai) class.

<Tip>
Expand Down Expand Up @@ -97,7 +97,7 @@ import { ChatVertexAI } from "@langchain/google-vertexai";
// import { ChatVertexAI } from "@langchain/google-vertexai-web";

const model = new ChatVertexAI({
model: "gemini-1.0-pro",
model: "gemini-2.5-pro",
maxOutputTokens: 2048,
});

Expand Down
11 changes: 5 additions & 6 deletions src/oss/python/integrations/chat/anthropic.mdx
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
---
title: ChatAnthropic
description: Get started using Anthropic [chat models](/oss/langchain/models) in LangChain.
---

This guide provides a quick overview for getting started with Claude [chat models](/oss/langchain/models).

You can find information about Anthropic's latest models, their costs, context windows, and supported input types in the [Claude](https://docs.claude.com/en/docs/about-claude/models/overview) docs.

<Tip>
Expand Down Expand Up @@ -49,7 +48,7 @@ To access Anthropic (Claude) models you'll need to install the `langchain-anthro

### Credentials

Head to [console.anthropic.com/](https://console.anthropic.com) to sign up for Anthropic and generate an API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:
Head to the [Claude console](https://console.anthropic.com) to sign up and generate a Claude API key. Once you've done this set the `ANTHROPIC_API_KEY` environment variable:

```python
import getpass
Expand Down Expand Up @@ -145,7 +144,7 @@ response.content
'type': 'tool_use'}]
```

Using `content_blocks` will render the content in a standard format that is consistent across other model providers:
Using `content_blocks` will render the content in a standard format that is consistent across other model providers. Read more about [content blocks](/oss/langchain/messages#standard-content-blocks).

```python
response.content_blocks
Expand Down Expand Up @@ -306,7 +305,7 @@ print(json.dumps(response.content_blocks, indent=2))

## Prompt caching

Anthropic supports [caching](https://docs.claude.com/en/docs/build-with-claude/prompt-caching) of [elements of your prompts](https://docs.claude.com/en/docs/build-with-claude/prompt-caching#what-can-be-cached), including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs.
Anthropic supports [caching](https://docs.claude.com/en/docs/build-with-claude/prompt-caching) of elements of your prompts, including messages, tool definitions, tool results, images and documents. This allows you to re-use large documents, instructions, [few-shot documents](/langsmith/create-few-shot-evaluators), and other data to reduce latency and costs.

To enable caching on an element of a prompt, mark its associated content block using the `cache_control` key. See examples below:

Expand Down Expand Up @@ -400,7 +399,7 @@ Second:

### Tools

```python
```python expandable
from langchain_anthropic import convert_to_anthropic_tool
from langchain.tools import tool

Expand Down
9 changes: 7 additions & 2 deletions src/oss/python/integrations/chat/azure_chat_openai.mdx
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
---
title: AzureChatOpenAI
description: Get started using OpenAI [chat models](/oss/langchain/models) via Azure in LangChain.
---

This guide provides a quick overview for getting started with OpenAI [chat models](/oss/langchain/models) on Azure.

You can find information about Azure OpenAI's latest models and their costs, context windows, and supported input types in the [Azure docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models).

<Info>
Expand Down Expand Up @@ -32,6 +31,12 @@ You can find information about Azure OpenAI's latest models and their costs, con
features, or head to the @[`AzureChatOpenAI`] API reference.
</Note>

<Tip>
**API Reference**

For detailed documentation of all features and configuration options, head to the @[`AzureChatOpenAI`] API reference.
</Tip>

## Overview

### Integration details
Expand Down
12 changes: 9 additions & 3 deletions src/oss/python/integrations/chat/deepseek.mdx
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
---
title: ChatDeepSeek
description: Get started using DeepSeek [chat models](/oss/langchain/models) in LangChain.
---

This will help you get started with DeepSeek's hosted [chat models](/oss/langchain/models). For detailed documentation of all ChatDeepSeek features and configurations head to the [API reference](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html).
This will help you get started with DeepSeek's hosted [chat models](/oss/langchain/models).

<Tip>
**DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.**
**API Reference**

For detailed documentation of all features and configuration options, head to the @[`ChatDeepSeek`] API reference.
</Tip>

<Tip>
**DeepSeek's models are open source and can be run locally (e.g. in [Ollama](./ollama.ipynb)) or on other inference providers (e.g. [Fireworks](./fireworks.ipynb), [Together](./together.ipynb)) as well.**
</Tip>

## Overview
Expand All @@ -15,7 +21,7 @@ This will help you get started with DeepSeek's hosted [chat models](/oss/langcha

| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/deepseek) | Downloads | Version |
| :--- | :--- | :---: | :---: | :---: | :---: | :---: |
| [ChatDeepSeek](https://python.langchain.com/api_reference/deepseek/chat_models/langchain_deepseek.chat_models.ChatDeepSeek.html) | [langchain-deepseek](https://python.langchain.com/api_reference/deepseek/) | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-deepseek?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-deepseek?style=flat-square&label=%20) |
| @[`ChatDeepSeek`] | @[`langchain-deepseek`] | ❌ | beta | ✅ | ![PyPI - Downloads](https://img.shields.io/pypi/dm/langchain-deepseek?style=flat-square&label=%20) | ![PyPI - Version](https://img.shields.io/pypi/v/langchain-deepseek?style=flat-square&label=%20) |

### Model features

Expand Down
Loading