From 9b23580cfd4372649c39258aeb130093cf118d27 Mon Sep 17 00:00:00 2001 From: bart0401 Date: Thu, 30 Oct 2025 14:23:35 +0900 Subject: [PATCH] Update retriever imports to use langchain_classic for v1 compatibility The langchain.retrievers module was removed in LangChain 1.0+. All retriever imports have been updated to use langchain_classic as documented in the v1 migration guide. Changes: - Document transformers (9 files): Updated ContextualCompressionRetriever imports - Retrievers (7 files): Updated various retriever class imports - Provider integrations (12 files): Updated provider-specific retriever imports - Vectorstores (2 files): Updated SelfQueryRetriever and MultiQueryRetriever imports - Document loaders (1 file): Updated SelfQueryRetriever and MultiVectorRetriever imports Fixes #1195 --- src/oss/python/integrations/callbacks/uptrain.mdx | 6 +++--- src/oss/python/integrations/document_loaders/docugami.mdx | 4 ++-- .../document_transformers/cross_encoder_reranker.mdx | 4 ++-- .../integrations/document_transformers/dashscope_rerank.mdx | 2 +- .../document_transformers/google_cloud_vertexai_rerank.mdx | 2 +- .../integrations/document_transformers/infinity_rerank.mdx | 2 +- .../integrations/document_transformers/jina_rerank.mdx | 2 +- .../integrations/document_transformers/openvino_rerank.mdx | 2 +- .../integrations/document_transformers/rankllm-reranker.mdx | 4 ++-- .../document_transformers/volcengine_rerank.mdx | 2 +- .../document_transformers/voyageai-reranker.mdx | 2 +- src/oss/python/integrations/providers/astradb.mdx | 2 +- src/oss/python/integrations/providers/breebs.mdx | 2 +- src/oss/python/integrations/providers/chaindesk.mdx | 2 +- src/oss/python/integrations/providers/chroma.mdx | 2 +- src/oss/python/integrations/providers/cohere.mdx | 6 +++--- src/oss/python/integrations/providers/metal.mdx | 2 +- src/oss/python/integrations/providers/outline.mdx | 2 +- src/oss/python/integrations/providers/pubmed.mdx | 2 +- src/oss/python/integrations/providers/ragatouille.mdx | 2 +- src/oss/python/integrations/providers/vespa.mdx | 2 +- src/oss/python/integrations/providers/wikipedia.mdx | 2 +- .../python/integrations/retrievers/flashrank-reranker.mdx | 2 +- src/oss/python/integrations/retrievers/fleet_context.mdx | 2 +- .../python/integrations/retrievers/greennode_reranker.mdx | 2 +- .../python/integrations/retrievers/ibm_watsonx_ranker.mdx | 2 +- src/oss/python/integrations/retrievers/llmlingua.mdx | 2 +- src/oss/python/integrations/retrievers/merger_retriever.mdx | 4 ++-- src/oss/python/integrations/retrievers/re_phrase.mdx | 2 +- .../python/integrations/vectorstores/timescalevector.mdx | 2 +- src/oss/python/integrations/vectorstores/vectara.mdx | 2 +- 31 files changed, 39 insertions(+), 39 deletions(-) diff --git a/src/oss/python/integrations/callbacks/uptrain.mdx b/src/oss/python/integrations/callbacks/uptrain.mdx index 66e7e60079..88878f9ab8 100644 --- a/src/oss/python/integrations/callbacks/uptrain.mdx +++ b/src/oss/python/integrations/callbacks/uptrain.mdx @@ -62,9 +62,9 @@ NOTE: that you can also install `faiss-gpu` instead of `faiss-cpu` if you want t from getpass import getpass from langchain.chains import RetrievalQA -from langchain.retrievers import ContextualCompressionRetriever -from langchain.retrievers.document_compressors import FlashrankRerank -from langchain.retrievers.multi_query import MultiQueryRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.document_compressors import FlashrankRerank +from langchain_classic.retrievers.multi_query import MultiQueryRetriever from langchain_community.callbacks.uptrain_callback import UpTrainCallbackHandler from langchain_community.document_loaders import TextLoader from langchain_community.vectorstores import FAISS diff --git a/src/oss/python/integrations/document_loaders/docugami.mdx b/src/oss/python/integrations/document_loaders/docugami.mdx index 3700bf3dcd..2fa1481bd9 100644 --- a/src/oss/python/integrations/document_loaders/docugami.mdx +++ b/src/oss/python/integrations/document_loaders/docugami.mdx @@ -204,7 +204,7 @@ We can use a self-querying retriever to improve our query accuracy, using this a ```python from langchain.chains.query_constructor.schema import AttributeInfo -from langchain.retrievers.self_query.base import SelfQueryRetriever +from langchain_classic.retrievers.self_query.base import SelfQueryRetriever from langchain_chroma import Chroma EXCLUDE_KEYS = ["id", "xpath", "structure"] @@ -322,7 +322,7 @@ CHUNK 21b4d9517f7ccdc0e3a028ce5043a2a0: page_content='1.1 Landlord.\n ``` ```python -from langchain.retrievers.multi_vector import MultiVectorRetriever, SearchType +from langchain_classic.retrievers.multi_vector import MultiVectorRetriever, SearchType from langchain.storage import InMemoryStore from langchain_chroma import Chroma from langchain_openai import OpenAIEmbeddings diff --git a/src/oss/python/integrations/document_transformers/cross_encoder_reranker.mdx b/src/oss/python/integrations/document_transformers/cross_encoder_reranker.mdx index 59b6408109..41773caba7 100644 --- a/src/oss/python/integrations/document_transformers/cross_encoder_reranker.mdx +++ b/src/oss/python/integrations/document_transformers/cross_encoder_reranker.mdx @@ -58,8 +58,8 @@ pretty_print_docs(docs) Now let's wrap our base retriever with a `ContextualCompressionRetriever`. `CrossEncoderReranker` uses `HuggingFaceCrossEncoder` to rerank the returned results. ```python -from langchain.retrievers import ContextualCompressionRetriever -from langchain.retrievers.document_compressors import CrossEncoderReranker +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.document_compressors import CrossEncoderReranker from langchain_community.cross_encoders import HuggingFaceCrossEncoder model = HuggingFaceCrossEncoder(model_name="BAAI/bge-reranker-base") diff --git a/src/oss/python/integrations/document_transformers/dashscope_rerank.mdx b/src/oss/python/integrations/document_transformers/dashscope_rerank.mdx index cac2381db3..6808f6c976 100644 --- a/src/oss/python/integrations/document_transformers/dashscope_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/dashscope_rerank.mdx @@ -263,7 +263,7 @@ And with an unwavering resolve that freedom will always triumph over tyranny. Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `DashScopeRerank` to rerank the returned results. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.dashscope_rerank import DashScopeRerank compressor = DashScopeRerank() diff --git a/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx b/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx index f650115bd1..afddf89c44 100644 --- a/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/google_cloud_vertexai_rerank.mdx @@ -64,7 +64,7 @@ Your 1 documents have been split into 266 chunks ```python import pandas as pd -from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_google_community.vertex_rank import VertexAIRank # Instantiate the VertexAIReranker with the SDK manager diff --git a/src/oss/python/integrations/document_transformers/infinity_rerank.mdx b/src/oss/python/integrations/document_transformers/infinity_rerank.mdx index 6f20b11198..b25d12fc75 100644 --- a/src/oss/python/integrations/document_transformers/infinity_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/infinity_rerank.mdx @@ -280,7 +280,7 @@ Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll ```python from infinity_client import Client -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.infinity_rerank import InfinityRerank client = Client(base_url="http://localhost:7997") diff --git a/src/oss/python/integrations/document_transformers/jina_rerank.mdx b/src/oss/python/integrations/document_transformers/jina_rerank.mdx index d5d5590165..ac13681b7d 100644 --- a/src/oss/python/integrations/document_transformers/jina_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/jina_rerank.mdx @@ -65,7 +65,7 @@ pretty_print_docs(docs) Now let's wrap our base retriever with a ContextualCompressionRetriever, using Jina Reranker as a compressor. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors import JinaRerank compressor = JinaRerank() diff --git a/src/oss/python/integrations/document_transformers/openvino_rerank.mdx b/src/oss/python/integrations/document_transformers/openvino_rerank.mdx index 1e65da8c6b..51d0ffdfcd 100644 --- a/src/oss/python/integrations/document_transformers/openvino_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/openvino_rerank.mdx @@ -291,7 +291,7 @@ Metadata: {'source': '../../how_to/state_of_the_union.txt', 'id': 40} Now let's wrap our base retriever with a `ContextualCompressionRetriever`, using `OpenVINOReranker` as a compressor. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.openvino_rerank import OpenVINOReranker model_name = "BAAI/bge-reranker-large" diff --git a/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx b/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx index 86d217a081..580b5bca8d 100644 --- a/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx +++ b/src/oss/python/integrations/document_transformers/rankllm-reranker.mdx @@ -270,7 +270,7 @@ RankZephyr performs listwise reranking for improved retrieval quality but requir ```python import torch -from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank torch.cuda.empty_cache() @@ -567,7 +567,7 @@ One America. Retrieval + Reranking with RankGPT ```python -from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.rankllm_rerank import RankLLMRerank compressor = RankLLMRerank(top_n=3, model="gpt", gpt_model="gpt-4o-mini") diff --git a/src/oss/python/integrations/document_transformers/volcengine_rerank.mdx b/src/oss/python/integrations/document_transformers/volcengine_rerank.mdx index efaa7420bf..c70d42ab5b 100644 --- a/src/oss/python/integrations/document_transformers/volcengine_rerank.mdx +++ b/src/oss/python/integrations/document_transformers/volcengine_rerank.mdx @@ -295,7 +295,7 @@ To disable this warning, you can either: Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll use the `VolcengineRerank` to rerank the returned results. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors.volcengine_rerank import VolcengineRerank compressor = VolcengineRerank() diff --git a/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx b/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx index 93f54eb2d9..1eee081a25 100644 --- a/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx +++ b/src/oss/python/integrations/document_transformers/voyageai-reranker.mdx @@ -292,7 +292,7 @@ Now let's wrap our base retriever with a `ContextualCompressionRetriever`. We'll - `rerank-lite-1` ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_openai import OpenAI from langchain_voyageai import VoyageAIRerank diff --git a/src/oss/python/integrations/providers/astradb.mdx b/src/oss/python/integrations/providers/astradb.mdx index 29d1e8e30c..7ee11790b1 100644 --- a/src/oss/python/integrations/providers/astradb.mdx +++ b/src/oss/python/integrations/providers/astradb.mdx @@ -137,7 +137,7 @@ Learn more in the [example notebook](/oss/integrations/document_loaders/astradb) ```python from langchain_astradb import AstraDBVectorStore -from langchain.retrievers.self_query.base import SelfQueryRetriever +from langchain_classic.retrievers.self_query.base import SelfQueryRetriever vector_store = AstraDBVectorStore( embedding=my_embedding, diff --git a/src/oss/python/integrations/providers/breebs.mdx b/src/oss/python/integrations/providers/breebs.mdx index 006e7cbf79..b262ebef0a 100644 --- a/src/oss/python/integrations/providers/breebs.mdx +++ b/src/oss/python/integrations/providers/breebs.mdx @@ -11,7 +11,7 @@ title: Breebs (Open Knowledge) ## Retriever ```python -from langchain.retrievers import BreebsRetriever +from langchain_classic.retrievers import BreebsRetriever ``` [See a usage example (Retrieval & ConversationalRetrievalChain)](/oss/integrations/retrievers/breebs) diff --git a/src/oss/python/integrations/providers/chaindesk.mdx b/src/oss/python/integrations/providers/chaindesk.mdx index 71109bb0e7..bd39814a6c 100644 --- a/src/oss/python/integrations/providers/chaindesk.mdx +++ b/src/oss/python/integrations/providers/chaindesk.mdx @@ -15,5 +15,5 @@ We need the [API Key](https://docs.chaindesk.ai/api-reference/authentication). See a [usage example](/oss/integrations/retrievers/chaindesk). ```python -from langchain.retrievers import ChaindeskRetriever +from langchain_classic.retrievers import ChaindeskRetriever ``` diff --git a/src/oss/python/integrations/providers/chroma.mdx b/src/oss/python/integrations/providers/chroma.mdx index 592bde511b..093fe66975 100644 --- a/src/oss/python/integrations/providers/chroma.mdx +++ b/src/oss/python/integrations/providers/chroma.mdx @@ -31,5 +31,5 @@ For a more detailed walkthrough of the Chroma wrapper, see [this notebook](/oss/ ## Retriever ```python -from langchain.retrievers import SelfQueryRetriever +from langchain_classic.retrievers import SelfQueryRetriever ``` diff --git a/src/oss/python/integrations/providers/cohere.mdx b/src/oss/python/integrations/providers/cohere.mdx index eb131da84e..d5182e5abd 100644 --- a/src/oss/python/integrations/providers/cohere.mdx +++ b/src/oss/python/integrations/providers/cohere.mdx @@ -25,9 +25,9 @@ Get a [Cohere api key](https://dashboard.cohere.ai/) and set it as an environmen |---|---|---|---|---| |Chat|Build chat bots|[chat](https://docs.cohere.com/reference/chat)|`from langchain_cohere import ChatCohere`|[cohere.ipynb](/oss/integrations/chat/cohere)| |LLM|Generate text|[generate](https://docs.cohere.com/reference/generate)|`from langchain_cohere.llms import Cohere`|[cohere.ipynb](/oss/integrations/llms/cohere)| -|RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain.retrievers import CohereRagRetriever`|[cohere.ipynb](/oss/integrations/retrievers/cohere)| +|RAG Retriever|Connect to external data sources|[chat + rag](https://docs.cohere.com/reference/chat)|`from langchain_classic.retrievers import CohereRagRetriever`|[cohere.ipynb](/oss/integrations/retrievers/cohere)| |Text Embedding|Embed strings to vectors|[embed](https://docs.cohere.com/reference/embed)|`from langchain_cohere import CohereEmbeddings`|[cohere.ipynb](/oss/integrations/text_embedding/cohere)| -|Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/oss/integrations/retrievers/cohere-reranker)| +|Rerank Retriever|Rank strings based on relevance|[rerank](https://docs.cohere.com/reference/rerank)|`from langchain_classic.retrievers.document_compressors import CohereRerank`|[cohere.ipynb](/oss/integrations/retrievers/cohere-reranker)| ## Quick copy examples @@ -140,7 +140,7 @@ The ReAct agent can be used to call multiple tools in sequence. ```python from langchain_cohere import ChatCohere -from langchain.retrievers import CohereRagRetriever +from langchain_classic.retrievers import CohereRagRetriever from langchain_core.documents import Document rag = CohereRagRetriever(llm=ChatCohere()) diff --git a/src/oss/python/integrations/providers/metal.mdx b/src/oss/python/integrations/providers/metal.mdx index 4094b4aba9..4ac8335b9f 100644 --- a/src/oss/python/integrations/providers/metal.mdx +++ b/src/oss/python/integrations/providers/metal.mdx @@ -15,7 +15,7 @@ Get started by [creating a Metal account](https://app.getmetal.io/signup). Then, you can easily take advantage of the `MetalRetriever` class to start retrieving your data for semantic search, prompting context, etc. This class takes a `Metal` instance and a dictionary of parameters to pass to the Metal API. ```python -from langchain.retrievers import MetalRetriever +from langchain_classic.retrievers import MetalRetriever from metal_sdk.metal import Metal diff --git a/src/oss/python/integrations/providers/outline.mdx b/src/oss/python/integrations/providers/outline.mdx index 425ee6c12b..1922cac65d 100644 --- a/src/oss/python/integrations/providers/outline.mdx +++ b/src/oss/python/integrations/providers/outline.mdx @@ -20,5 +20,5 @@ os.environ["OUTLINE_INSTANCE_URL"] = "https://app.getoutline.com" See a [usage example](/oss/integrations/retrievers/outline). ```python -from langchain.retrievers import OutlineRetriever +from langchain_classic.retrievers import OutlineRetriever ``` diff --git a/src/oss/python/integrations/providers/pubmed.mdx b/src/oss/python/integrations/providers/pubmed.mdx index 0e432bd1fe..4af9e56138 100644 --- a/src/oss/python/integrations/providers/pubmed.mdx +++ b/src/oss/python/integrations/providers/pubmed.mdx @@ -24,7 +24,7 @@ uv add xmltodict See a [usage example](/oss/integrations/retrievers/pubmed). ```python -from langchain.retrievers import PubMedRetriever +from langchain_classic.retrievers import PubMedRetriever ``` ### Document Loader diff --git a/src/oss/python/integrations/providers/ragatouille.mdx b/src/oss/python/integrations/providers/ragatouille.mdx index 17ea44b309..e91b655bde 100644 --- a/src/oss/python/integrations/providers/ragatouille.mdx +++ b/src/oss/python/integrations/providers/ragatouille.mdx @@ -104,7 +104,7 @@ We can see that the result isn't super relevant to the question asked ## Using ColBERT as a reranker ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever compression_retriever = ContextualCompressionRetriever( base_compressor=RAG.as_langchain_document_compressor(), base_retriever=retriever diff --git a/src/oss/python/integrations/providers/vespa.mdx b/src/oss/python/integrations/providers/vespa.mdx index 9a960e1b84..08ce468be6 100644 --- a/src/oss/python/integrations/providers/vespa.mdx +++ b/src/oss/python/integrations/providers/vespa.mdx @@ -25,5 +25,5 @@ uv add pyvespa See a [usage example](/oss/integrations/retrievers/vespa). ```python -from langchain.retrievers import VespaRetriever +from langchain_classic.retrievers import VespaRetriever ``` diff --git a/src/oss/python/integrations/providers/wikipedia.mdx b/src/oss/python/integrations/providers/wikipedia.mdx index b97c9230ad..da32b490ef 100644 --- a/src/oss/python/integrations/providers/wikipedia.mdx +++ b/src/oss/python/integrations/providers/wikipedia.mdx @@ -32,5 +32,5 @@ from langchain_community.document_loaders import WikipediaLoader See a [usage example](/oss/integrations/retrievers/wikipedia). ```python -from langchain.retrievers import WikipediaRetriever +from langchain_classic.retrievers import WikipediaRetriever ``` diff --git a/src/oss/python/integrations/retrievers/flashrank-reranker.mdx b/src/oss/python/integrations/retrievers/flashrank-reranker.mdx index ac204f138f..8f2e950696 100644 --- a/src/oss/python/integrations/retrievers/flashrank-reranker.mdx +++ b/src/oss/python/integrations/retrievers/flashrank-reranker.mdx @@ -271,7 +271,7 @@ May God bless you all. May God protect our troops. Now let's wrap our base retriever with a `ContextualCompressionRetriever`, using `FlashrankRerank` as a compressor. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors import FlashrankRerank from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/retrievers/fleet_context.mdx b/src/oss/python/integrations/retrievers/fleet_context.mdx index 6cf7708996..47662f005d 100644 --- a/src/oss/python/integrations/retrievers/fleet_context.mdx +++ b/src/oss/python/integrations/retrievers/fleet_context.mdx @@ -17,7 +17,7 @@ from operator import itemgetter from typing import Any, Optional, Type import pandas as pd -from langchain.retrievers import MultiVectorRetriever +from langchain_classic.retrievers import MultiVectorRetriever from langchain_community.vectorstores import FAISS from langchain_core.documents import Document from langchain_core.stores import BaseStore diff --git a/src/oss/python/integrations/retrievers/greennode_reranker.mdx b/src/oss/python/integrations/retrievers/greennode_reranker.mdx index a9c1d24dda..3743a08703 100644 --- a/src/oss/python/integrations/retrievers/greennode_reranker.mdx +++ b/src/oss/python/integrations/retrievers/greennode_reranker.mdx @@ -71,7 +71,7 @@ reranker = GreenNodeRerank( Reranking models enhance retrieval-augmented generation (RAG) workflows by refining and reordering initial search results based on semantic relevance. The example below demonstrates how to integrate GreenNodeRerank with a base retriever to improve the quality of retrieved documents. ```python -from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.vectorstores import FAISS from langchain_core.documents import Document from langchain_greennode import GreenNodeEmbeddings diff --git a/src/oss/python/integrations/retrievers/ibm_watsonx_ranker.mdx b/src/oss/python/integrations/retrievers/ibm_watsonx_ranker.mdx index 0fe04c9253..244938ff3b 100644 --- a/src/oss/python/integrations/retrievers/ibm_watsonx_ranker.mdx +++ b/src/oss/python/integrations/retrievers/ibm_watsonx_ranker.mdx @@ -180,7 +180,7 @@ wx_rerank = WatsonxRerank( ``` ```python -from langchain.retrievers.contextual_compression import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever compression_retriever = ContextualCompressionRetriever( base_compressor=wx_rerank, base_retriever=retriever diff --git a/src/oss/python/integrations/retrievers/llmlingua.mdx b/src/oss/python/integrations/retrievers/llmlingua.mdx index ab8ca5e9c0..79f0d8ef0c 100644 --- a/src/oss/python/integrations/retrievers/llmlingua.mdx +++ b/src/oss/python/integrations/retrievers/llmlingua.mdx @@ -261,7 +261,7 @@ May God bless you all. May God protect our troops. Now let’s wrap our base retriever with a `ContextualCompressionRetriever`, using `LLMLinguaCompressor` as a compressor. ```python -from langchain.retrievers import ContextualCompressionRetriever +from langchain_classic.retrievers.contextual_compression import ContextualCompressionRetriever from langchain_community.document_compressors import LLMLinguaCompressor from langchain_openai import ChatOpenAI diff --git a/src/oss/python/integrations/retrievers/merger_retriever.mdx b/src/oss/python/integrations/retrievers/merger_retriever.mdx index 1c0e51aac9..4d4da94823 100644 --- a/src/oss/python/integrations/retrievers/merger_retriever.mdx +++ b/src/oss/python/integrations/retrievers/merger_retriever.mdx @@ -10,11 +10,11 @@ The `MergerRetriever` class can be used to improve the accuracy of document retr import os import chromadb -from langchain.retrievers import ( +from langchain_classic.retrievers import ( ContextualCompressionRetriever, - DocumentCompressorPipeline, MergerRetriever, ) +from langchain_classic.retrievers.document_compressors import DocumentCompressorPipeline from langchain_chroma import Chroma from langchain_community.document_transformers import ( EmbeddingsClusteringFilter, diff --git a/src/oss/python/integrations/retrievers/re_phrase.mdx b/src/oss/python/integrations/retrievers/re_phrase.mdx index 7bdb7785e9..1fa364032d 100644 --- a/src/oss/python/integrations/retrievers/re_phrase.mdx +++ b/src/oss/python/integrations/retrievers/re_phrase.mdx @@ -15,7 +15,7 @@ Create a vector store. ```python import logging -from langchain.retrievers import RePhraseQueryRetriever +from langchain_classic.retrievers import RePhraseQueryRetriever from langchain_chroma import Chroma from langchain_community.document_loaders import WebBaseLoader from langchain_openai import ChatOpenAI, OpenAIEmbeddings diff --git a/src/oss/python/integrations/vectorstores/timescalevector.mdx b/src/oss/python/integrations/vectorstores/timescalevector.mdx index 69cd5859f4..8ee74eb241 100644 --- a/src/oss/python/integrations/vectorstores/timescalevector.mdx +++ b/src/oss/python/integrations/vectorstores/timescalevector.mdx @@ -789,7 +789,7 @@ Next we'll create our self-querying retriever. To do this we'll need to provide ```python from langchain.chains.query_constructor.base import AttributeInfo -from langchain.retrievers.self_query.base import SelfQueryRetriever +from langchain_classic.retrievers.self_query.base import SelfQueryRetriever from langchain_openai import OpenAI # Give LLM info about the metadata fields diff --git a/src/oss/python/integrations/vectorstores/vectara.mdx b/src/oss/python/integrations/vectorstores/vectara.mdx index cf0908d72a..aba667dc16 100644 --- a/src/oss/python/integrations/vectorstores/vectara.mdx +++ b/src/oss/python/integrations/vectorstores/vectara.mdx @@ -228,7 +228,7 @@ Vectara's "RAG as a service" does a lot of the heavy lifting in creating questio Since MQR uses an LLM we have to set that up - here we choose @[`ChatOpenAI`] : ```python -from langchain.retrievers.multi_query import MultiQueryRetriever +from langchain_classic.retrievers.multi_query import MultiQueryRetriever from langchain_openai.chat_models import ChatOpenAI llm = ChatOpenAI(temperature=0)