-
Notifications
You must be signed in to change notification settings - Fork 192
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Extend langchain embedding API (#735)
* extend langchain embeddings Signed-off-by: yuwenzho <yuwen.zhou@intel.com>
- Loading branch information
Showing
14 changed files
with
565 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# Copyright (c) 2023 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. |
22 changes: 22 additions & 0 deletions
22
intel_extension_for_transformers/langchain/embeddings/__init__.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,22 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# Copyright (c) 2023 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
from .embeddings import ( | ||
HuggingFaceEmbeddings, | ||
HuggingFaceBgeEmbeddings, | ||
HuggingFaceInstructEmbeddings | ||
) |
305 changes: 305 additions & 0 deletions
305
intel_extension_for_transformers/langchain/embeddings/embeddings.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,305 @@ | ||
#!/usr/bin/env python | ||
# -*- coding: utf-8 -*- | ||
# | ||
# Copyright (c) 2023 Intel Corporation | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
|
||
import logging | ||
from typing import Any, Dict, List, Optional | ||
from .optimized_instructor_embedding import OptimizedInstructor | ||
from .optimized_sentence_transformers import OptimizedSentenceTransformer | ||
from intel_extension_for_transformers.transformers.utils.utility import LazyImport | ||
|
||
langchain_core = LazyImport("langchain_core") | ||
|
||
DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" | ||
DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" | ||
DEFAULT_BGE_MODEL = "BAAI/bge-large-en" | ||
DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " | ||
DEFAULT_QUERY_INSTRUCTION = ( | ||
"Represent the question for retrieving supporting documents: " | ||
) | ||
DEFAULT_QUERY_BGE_INSTRUCTION_EN = ( | ||
"Represent this question for searching relevant passages: " | ||
) | ||
DEFAULT_QUERY_BGE_INSTRUCTION_ZH = "为这个句子生成表示以用于检索相关文章:" | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
class HuggingFaceEmbeddings(langchain_core.pydantic_v1.BaseModel, langchain_core.embeddings.Embeddings): | ||
"""HuggingFace sentence_transformers embedding models. | ||
To use, you should have the ``sentence_transformers`` python package installed. | ||
Example: | ||
.. code-block:: python | ||
from intel_extension_for_transformers.langchain.embeddings import HuggingFaceEmbeddings | ||
model_name = "sentence-transformers/all-mpnet-base-v2" | ||
model_kwargs = {'device': 'cpu'} | ||
encode_kwargs = {'normalize_embeddings': False} | ||
hf = HuggingFaceEmbeddings( | ||
model_name=model_name, | ||
model_kwargs=model_kwargs, | ||
encode_kwargs=encode_kwargs | ||
) | ||
""" | ||
|
||
client: Any #: :meta private: | ||
model_name: str = DEFAULT_MODEL_NAME | ||
"""Model name to use.""" | ||
cache_folder: Optional[str] = None | ||
"""Path to store models. | ||
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" | ||
model_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass to the model.""" | ||
encode_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass when calling the `encode` method of the model.""" | ||
multi_process: bool = False | ||
"""Run encode() on multiple GPUs.""" | ||
|
||
def __init__(self, **kwargs: Any): | ||
"""Initialize the sentence_transformer.""" | ||
super().__init__(**kwargs) | ||
try: | ||
import sentence_transformers | ||
|
||
except ImportError as exc: | ||
raise ImportError( | ||
"Could not import sentence_transformers python package. " | ||
"Please install it with `pip install sentence-transformers`." | ||
) from exc | ||
|
||
self.client = OptimizedSentenceTransformer( | ||
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs | ||
) | ||
|
||
class Config: | ||
"""Configuration for this pydantic object.""" | ||
|
||
extra = langchain_core.pydantic_v1.Extra.forbid | ||
|
||
def embed_documents(self, texts: List[str]) -> List[List[float]]: | ||
"""Compute doc embeddings using a HuggingFace transformer model. | ||
Args: | ||
texts: The list of texts to embed. | ||
Returns: | ||
List of embeddings, one for each text. | ||
""" | ||
import sentence_transformers | ||
|
||
texts = list(map(lambda x: x.replace("\n", " "), texts)) | ||
if self.multi_process: | ||
pool = self.client.start_multi_process_pool() | ||
embeddings = self.client.encode_multi_process(texts, pool) | ||
sentence_transformers.SentenceTransformer.stop_multi_process_pool(pool) | ||
else: | ||
embeddings = self.client.encode(texts, **self.encode_kwargs) | ||
|
||
return embeddings.tolist() | ||
|
||
def embed_query(self, text: str) -> List[float]: | ||
"""Compute query embeddings using a HuggingFace transformer model. | ||
Args: | ||
text: The text to embed. | ||
Returns: | ||
Embeddings for the text. | ||
""" | ||
return self.embed_documents([text])[0] | ||
|
||
|
||
class HuggingFaceBgeEmbeddings(langchain_core.pydantic_v1.BaseModel, langchain_core.embeddings.Embeddings): | ||
"""HuggingFace BGE sentence_transformers embedding models. | ||
To use, you should have the ``sentence_transformers`` python package installed. | ||
Example: | ||
.. code-block:: python | ||
from intel_extension_for_transformers.langchain.embeddings import HuggingFaceBgeEmbeddings | ||
model_name = "BAAI/bge-large-en" | ||
model_kwargs = {'device': 'cpu'} | ||
encode_kwargs = {'normalize_embeddings': True} | ||
hf = HuggingFaceBgeEmbeddings( | ||
model_name=model_name, | ||
model_kwargs=model_kwargs, | ||
encode_kwargs=encode_kwargs | ||
) | ||
""" | ||
|
||
client: Any #: :meta private: | ||
model_name: str = DEFAULT_BGE_MODEL | ||
"""Model name to use.""" | ||
cache_folder: Optional[str] = None | ||
"""Path to store models. | ||
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" | ||
model_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass to the model.""" | ||
encode_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass when calling the `encode` method of the model.""" | ||
query_instruction: str = DEFAULT_QUERY_BGE_INSTRUCTION_EN | ||
"""Instruction to use for embedding query.""" | ||
|
||
def __init__(self, **kwargs: Any): | ||
"""Initialize the sentence_transformer.""" | ||
super().__init__(**kwargs) | ||
try: | ||
import sentence_transformers | ||
|
||
except ImportError as exc: | ||
raise ImportError( | ||
"Could not import sentence_transformers python package. " | ||
"Please install it with `pip install sentence_transformers`." | ||
) from exc | ||
|
||
self.client = OptimizedSentenceTransformer( | ||
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs | ||
) | ||
if "-zh" in self.model_name: | ||
self.query_instruction = DEFAULT_QUERY_BGE_INSTRUCTION_ZH | ||
|
||
class Config: | ||
"""Configuration for this pydantic object.""" | ||
|
||
extra = langchain_core.pydantic_v1.Extra.forbid | ||
|
||
def embed_documents(self, texts: List[str]) -> List[List[float]]: | ||
"""Compute doc embeddings using a HuggingFace transformer model. | ||
Args: | ||
texts: The list of texts to embed. | ||
Returns: | ||
List of embeddings, one for each text. | ||
""" | ||
texts = [t.replace("\n", " ") for t in texts] | ||
embeddings = self.client.encode(texts, **self.encode_kwargs) | ||
return embeddings.tolist() | ||
|
||
def embed_query(self, text: str) -> List[float]: | ||
"""Compute query embeddings using a HuggingFace transformer model. | ||
Args: | ||
text: The text to embed. | ||
Returns: | ||
Embeddings for the text. | ||
""" | ||
text = text.replace("\n", " ") | ||
embedding = self.client.encode( | ||
self.query_instruction + text, **self.encode_kwargs | ||
) | ||
return embedding.tolist() | ||
|
||
class HuggingFaceInstructEmbeddings(langchain_core.pydantic_v1.BaseModel, langchain_core.embeddings.Embeddings): | ||
"""Wrapper around sentence_transformers embedding models. | ||
To use, you should have the ``sentence_transformers`` | ||
and ``InstructorEmbedding`` python packages installed. | ||
Example: | ||
.. code-block:: python | ||
from intel_extension_for_transformers.langchain.embeddings import HuggingFaceInstructEmbeddings | ||
model_name = "hkunlp/instructor-large" | ||
model_kwargs = {'device': 'cpu'} | ||
encode_kwargs = {'normalize_embeddings': True} | ||
hf = HuggingFaceInstructEmbeddings( | ||
model_name=model_name, | ||
model_kwargs=model_kwargs, | ||
encode_kwargs=encode_kwargs | ||
) | ||
""" | ||
|
||
client: Any #: :meta private: | ||
model_name: str = DEFAULT_INSTRUCT_MODEL | ||
"""Model name to use.""" | ||
cache_folder: Optional[str] = None | ||
"""Path to store models. | ||
Can be also set by SENTENCE_TRANSFORMERS_HOME environment variable.""" | ||
model_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass to the model.""" | ||
encode_kwargs: Dict[str, Any] = langchain_core.pydantic_v1.Field(default_factory=dict) | ||
"""Keyword arguments to pass when calling the `encode` method of the model.""" | ||
embed_instruction: str = DEFAULT_EMBED_INSTRUCTION | ||
"""Instruction to use for embedding documents.""" | ||
query_instruction: str = DEFAULT_QUERY_INSTRUCTION | ||
"""Instruction to use for embedding query.""" | ||
|
||
def __init__(self, **kwargs: Any): | ||
"""Initialize the sentence_transformer.""" | ||
super().__init__(**kwargs) | ||
|
||
# check sentence_transformers python package | ||
try: | ||
import sentence_transformers | ||
|
||
except ImportError as exc: | ||
raise ImportError( | ||
"Could not import sentence_transformers python package. " | ||
"Please install it with `pip install sentence_transformers`." | ||
) from exc | ||
|
||
# check InstructorEmbedding python package | ||
try: | ||
import InstructorEmbedding | ||
|
||
except ImportError as exc: | ||
raise ImportError( | ||
"Could not import InstructorEmbedding python package. " | ||
"Please install it with `pip install InstructorEmbedding`." | ||
) from exc | ||
|
||
self.client = OptimizedInstructor( | ||
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs | ||
) | ||
|
||
class Config: | ||
"""Configuration for this pydantic object.""" | ||
|
||
extra = langchain_core.pydantic_v1.Extra.forbid | ||
|
||
def embed_documents(self, texts: List[str]) -> List[List[float]]: | ||
"""Compute doc embeddings using a HuggingFace instruct model. | ||
Args: | ||
texts: The list of texts to embed. | ||
Returns: | ||
List of embeddings, one for each text. | ||
""" | ||
instruction_pairs = [[self.embed_instruction, text] for text in texts] | ||
embeddings = self.client.encode(instruction_pairs, **self.encode_kwargs) | ||
return embeddings.tolist() | ||
|
||
def embed_query(self, text: str) -> List[float]: | ||
"""Compute query embeddings using a HuggingFace instruct model. | ||
Args: | ||
text: The text to embed. | ||
Returns: | ||
Embeddings for the text. | ||
""" | ||
instruction_pair = [self.query_instruction, text] | ||
embedding = self.client.encode([instruction_pair], **self.encode_kwargs)[0] | ||
return embedding.tolist() |
Oops, something went wrong.