Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/oss/python/integrations/callbacks/uptrain.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ NOTE: that you can also install `faiss-gpu` instead of `faiss-cpu` if you want t
from getpass import getpass

from langchain.chains import RetrievalQA
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import FlashrankRerank
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.document_compressors import FlashrankRerank
from langchain_classic.retrievers.multi_query import MultiQueryRetriever
from langchain_community.callbacks.uptrain_callback import UpTrainCallbackHandler
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
Expand Down
4 changes: 2 additions & 2 deletions src/oss/python/integrations/document_loaders/docugami.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ We can use a self-querying retriever to improve our query accuracy, using this a

```python
from langchain.chains.query_constructor.schema import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_classic.retrievers.self_query.base import SelfQueryRetriever
from langchain_chroma import Chroma

EXCLUDE_KEYS = ["id", "xpath", "structure"]
Expand Down Expand Up @@ -322,7 +322,7 @@ CHUNK 21b4d9517f7ccdc0e3a028ce5043a2a0: page_content='1.1 Landlord.\n <Landlord>
```

```python
from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain_classic.retrievers.multi_vector import MultiVectorRetriever, SearchType
from langchain.storage import InMemoryStore
from langchain_chroma import Chroma
from langchain_openai import OpenAIEmbeddings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ pretty_print_docs(docs)
Now let's wrap our base retriever with a `ContextualCompressionRetriever`. `CrossEncoderReranker` uses `HuggingFaceCrossEncoder` to rerank the returned results.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CrossEncoderReranker
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.document_compressors import CrossEncoderReranker
from langchain_community.cross_encoders import HuggingFaceCrossEncoder

model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ And with an unwavering resolve that freedom will always triumph over tyranny.
Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `DashScopeRerank` to rerank the returned results.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank

compressor = DashScopeRerank()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ Your 1 documents have been split into 266 chunks

```python
import pandas as pd
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_google_community.vertex_rank import VertexAIRank

# Instantiate the VertexAIReranker with the SDK manager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll

```python
from infinity_client import Client
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.infinity_rerank import InfinityRerank

client = Client(base_url="http://localhost:7997")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ pretty_print_docs(docs)
Now let's wrap our base retriever with a ContextualCompressionRetriever, using Jina Reranker as a compressor.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors import JinaRerank

compressor = JinaRerank()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ Metadata: {'source': '../../how_to/state_of_the_union.txt', 'id': 40}
Now let's wrap our base retriever with a `ContextualCompressionRetriever`, using `OpenVINOReranker` as a compressor.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.openvino_rerank import OpenVINOReranker

model_name = "BAAI/bge-reranker-large"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ RankZephyr performs listwise reranking for improved retrieval quality but requir

```python
import torch
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank

torch.cuda.empty_cache()
Expand Down Expand Up @@ -567,7 +567,7 @@ One America.
Retrieval + Reranking with RankGPT

```python
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank

compressor = RankLLMRerank(top_n=3, model="gpt", gpt_model="gpt-4o-mini")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ To disable this warning, you can either:
Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `VolcengineRerank` to rerank the returned results.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors.volcengine_rerank import VolcengineRerank

compressor = VolcengineRerank()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll
- `rerank-lite-1`

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_openai import OpenAI
from langchain_voyageai import VoyageAIRerank

Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/astradb.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ Learn more in the [example notebook](/oss/integrations/document_loaders/astradb)

```python
from langchain_astradb import AstraDBVectorStore
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_classic.retrievers.self_query.base import SelfQueryRetriever

vector_store = AstraDBVectorStore(
embedding=my_embedding,
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/breebs.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ title: Breebs (Open Knowledge)
## Retriever

```python
from langchain.retrievers import BreebsRetriever
from langchain_classic.retrievers import BreebsRetriever
```

[See a usage example (Retrieval & ConversationalRetrievalChain)](/oss/integrations/retrievers/breebs)
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/chaindesk.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,5 @@ We need the [API Key](https://docs.chaindesk.ai/api-reference/authentication).
See a [usage example](/oss/integrations/retrievers/chaindesk).

```python
from langchain.retrievers import ChaindeskRetriever
from langchain_classic.retrievers import ChaindeskRetriever
```
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/chroma.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,5 @@ For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/oss/
## Retriever

```python
from langchain.retrievers import SelfQueryRetriever
from langchain_classic.retrievers import SelfQueryRetriever
```
6 changes: 3 additions & 3 deletions src/oss/python/integrations/providers/cohere.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@ Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environmen
|---|---|---|---|---|
|Chat|Build chat bots|[chat](https://docs.cohere.com/reference/chat)|`from langchain_cohere import ChatCohere`|[cohere.ipynb](/oss/integrations/chat/cohere)|
|LLM|Generate text|[generate](https://docs.cohere.com/reference/generate)|`from langchain_cohere.llms import Cohere`|[cohere.ipynb](/oss/integrations/llms/cohere)|
|RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain.retrievers import CohereRagRetriever`|[cohere.ipynb](/oss/integrations/retrievers/cohere)|
|RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain_classic.retrievers import CohereRagRetriever`|[cohere.ipynb](/oss/integrations/retrievers/cohere)|
|Text Embedding|Embed strings to vectors|[embed](https://docs.cohere.com/reference/embed)|`from langchain_cohere import CohereEmbeddings`|[cohere.ipynb](/oss/integrations/text_embedding/cohere)|
|Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/oss/integrations/retrievers/cohere-reranker)|
|Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain_classic.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/oss/integrations/retrievers/cohere-reranker)|

## Quick copy examples

Expand Down Expand Up @@ -140,7 +140,7 @@ The ReAct agent can be used to call multiple tools in sequence.

```python
from langchain_cohere import ChatCohere
from langchain.retrievers import CohereRagRetriever
from langchain_classic.retrievers import CohereRagRetriever
from langchain_core.documents import Document

rag = CohereRagRetriever(llm=ChatCohere())
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/metal.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Get started by [creating a Metal account](https://app.getmetal.io/signup).
Then, you can easily take advantage of the `MetalRetriever` class to start retrieving your data for semantic search, prompting context, etc. This class takes a `Metal` instance and a dictionary of parameters to pass to the Metal API.

```python
from langchain.retrievers import MetalRetriever
from langchain_classic.retrievers import MetalRetriever
from metal_sdk.metal import Metal


Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/outline.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,5 @@ os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com"
See a [usage example](/oss/integrations/retrievers/outline).

```python
from langchain.retrievers import OutlineRetriever
from langchain_classic.retrievers import OutlineRetriever
```
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/pubmed.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ uv add xmltodict
See a [usage example](/oss/integrations/retrievers/pubmed).

```python
from langchain.retrievers import PubMedRetriever
from langchain_classic.retrievers import PubMedRetriever
```

### Document Loader
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/ragatouille.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ We can see that the result isn't super relevant to the question asked
## Using ColBERT as a reranker

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever

compression_retriever = ContextualCompressionRetriever(
base_compressor=RAG.as_langchain_document_compressor(), base_retriever=retriever
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/vespa.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,5 @@ uv add pyvespa
See a [usage example](/oss/integrations/retrievers/vespa).

```python
from langchain.retrievers import VespaRetriever
from langchain_classic.retrievers import VespaRetriever
```
2 changes: 1 addition & 1 deletion src/oss/python/integrations/providers/wikipedia.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ from langchain_community.document_loaders import WikipediaLoader
See a [usage example](/oss/integrations/retrievers/wikipedia).

```python
from langchain.retrievers import WikipediaRetriever
from langchain_classic.retrievers import WikipediaRetriever
```
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ May God bless you all. May God protect our troops.
Now let's wrap our base retriever with a `ContextualCompressionRetriever`, using `FlashrankRerank` as a compressor.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors import FlashrankRerank
from langchain_openai import ChatOpenAI

Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/retrievers/fleet_context.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ from operator import itemgetter
from typing import Any, Optional, Type

import pandas as pd
from langchain.retrievers import MultiVectorRetriever
from langchain_classic.retrievers import MultiVectorRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_core.stores import BaseStore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ reranker = GreenNodeRerank(
Reranking models enhance retrieval-augmented generation (RAG) workflows by refining and reordering initial search results based on semantic relevance. The example below demonstrates how to integrate GreenNodeRerank with a base retriever to improve the quality of retrieved documents.

```python
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.vectorstores import FAISS
from langchain_core.documents import Document
from langchain_greennode import GreenNodeEmbeddings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ wx_rerank = WatsonxRerank(
```

```python
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever

compression_retriever = ContextualCompressionRetriever(
base_compressor=wx_rerank, base_retriever=retriever
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/retrievers/llmlingua.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ May God bless you all. May God protect our troops.
Now let’s wrap our base retriever with a `ContextualCompressionRetriever`, using `LLMLinguaCompressor` as a compressor.

```python
from langchain.retrievers import ContextualCompressionRetriever
from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_community.document_compressors import LLMLinguaCompressor
from langchain_openai import ChatOpenAI

Expand Down
4 changes: 2 additions & 2 deletions src/oss/python/integrations/retrievers/merger_retriever.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ The `MergerRetriever` class can be used to improve the accuracy of document retr
import os

import chromadb
from langchain.retrievers import (
from langchain_classic.retrievers import (
ContextualCompressionRetriever,
DocumentCompressorPipeline,
MergerRetriever,
)
from langchain_classic.retrievers.document_compressors import DocumentCompressorPipeline
from langchain_chroma import Chroma
from langchain_community.document_transformers import (
EmbeddingsClusteringFilter,
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/retrievers/re_phrase.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Create a vector store.
```python
import logging

from langchain.retrievers import RePhraseQueryRetriever
from langchain_classic.retrievers import RePhraseQueryRetriever
from langchain_chroma import Chroma
from langchain_community.document_loaders import WebBaseLoader
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,7 @@ Next we'll create our self-querying retriever. To do this we'll need to provide

```python
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain_classic.retrievers.self_query.base import SelfQueryRetriever
from langchain_openai import OpenAI

# Give LLM info about the metadata fields
Expand Down
2 changes: 1 addition & 1 deletion src/oss/python/integrations/vectorstores/vectara.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ Vectara's "RAG as a service" does a lot of the heavy lifting in creating questio
Since MQR uses an LLM we have to set that up - here we choose @[`ChatOpenAI`] :

```python
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain_classic.retrievers.multi_query import MultiQueryRetriever
from langchain_openai.chat_models import ChatOpenAI

llm = ChatOpenAI(temperature=0)
Expand Down