From 12818d35612ac5cf5c56effb890bbaecbbf09c6e Mon Sep 17 00:00:00 2001 From: Liuhong99 <39693953+Liuhong99@users.noreply.github.com> Date: Tue, 21 Nov 2023 16:53:16 -0800 Subject: [PATCH] Added voyage query engine --- .../llama_packs/voyage_query_engine/README.md | 46 +++++++++++++++++++ .../voyage_query_engine/__init__.py | 3 ++ .../llama_packs/voyage_query_engine/base.py | 29 ++++++++++++ .../voyage_query_engine/requirements.txt | 2 + 4 files changed, 80 insertions(+) create mode 100644 llama_hub/llama_packs/voyage_query_engine/README.md create mode 100644 llama_hub/llama_packs/voyage_query_engine/__init__.py create mode 100644 llama_hub/llama_packs/voyage_query_engine/base.py create mode 100644 llama_hub/llama_packs/voyage_query_engine/requirements.txt diff --git a/llama_hub/llama_packs/voyage_query_engine/README.md b/llama_hub/llama_packs/voyage_query_engine/README.md new file mode 100644 index 0000000000..1ad02d32b2 --- /dev/null +++ b/llama_hub/llama_packs/voyage_query_engine/README.md @@ -0,0 +1,46 @@ +# Voyage Query Engine Pack + +Create a query engine using GPT4 and [Voyage AI](https://docs.voyageai.com/embeddings/) Embeddings. + +## CLI Usage + +You can download llamapacks directly using `llamaindex-cli`, which comes installed with the `llama-index` python package: + +```bash +llamaindex-cli download-llamapack VoyageQueryEnginePack --download-dir ./voyage_pack +``` + +You can then inspect the files at `./voyage_pack` and use them as a template for your own project. + +## Code Usage + +You can download the pack to a the `./voyage_pack` directory: + +```python +from llama_index.llama_packs import download_llama_pack + +# download and install dependencies +VoyageQueryEnginePack = download_llama_pack( + "VoyageQueryEnginePack", "./voyage_pack" +) + +# You can use any llama-hub loader to get documents! +voyage_pack = VoyageQueryEnginePack(documents) +``` + +From here, you can use the pack, or inspect and modify the pack in `./voyage_pack`. + +The `run()` function is a light wrapper around `index.as_query_engine().query()`. + +```python +response = voyage_pack.run("What did the author do growing up?", similarity_top_k=2) +``` + +You can also use modules individually. + +```python +# Use the index directly +index = voyage_pack.index +query_engine = index.as_query_engine() +retriver = index.as_retriever() +``` diff --git a/llama_hub/llama_packs/voyage_query_engine/__init__.py b/llama_hub/llama_packs/voyage_query_engine/__init__.py new file mode 100644 index 0000000000..c0fe6271b6 --- /dev/null +++ b/llama_hub/llama_packs/voyage_query_engine/__init__.py @@ -0,0 +1,3 @@ +from llama_hub.llama_packs.voyage_query_engine.base import VoyageQueryEnginePack + +__all__ = ["VoyageQueryEnginePack"] diff --git a/llama_hub/llama_packs/voyage_query_engine/base.py b/llama_hub/llama_packs/voyage_query_engine/base.py new file mode 100644 index 0000000000..cd15dd9335 --- /dev/null +++ b/llama_hub/llama_packs/voyage_query_engine/base.py @@ -0,0 +1,29 @@ +from typing import Any, Dict, List +from llama_index import ServiceContext, VectorStoreIndex +from llama_index.llms import OpenAI +from llama_index.embeddings import VoyageEmbedding +from llama_index.llama_pack.base import BaseLlamaPack +from llama_index.schema import Document +import os + +class VoyageQueryEnginePack(BaseLlamaPack): + def __init__(self, documents: List[Document]) -> None: + + llm = OpenAI(model='gpt-4') + embed_model = VoyageEmbedding(model_name="voyage-01", voyage_api_key=os.environ['VOYAGE_API_KEY']) + service_context = ServiceContext.from_defaults( + llm=llm, embed_model=embed_model + ) + self.llm = llm + self.index = VectorStoreIndex.from_documents( + documents, service_context=service_context + ) + + def get_modules(self) -> Dict[str, Any]: + """Get modules.""" + return {"llm": self.llm, "index": self.index} + + def run(self, query_str: str, **kwargs: Any) -> Any: + """Run the pipeline.""" + query_engine = self.index.as_query_engine(**kwargs) + return query_engine.query(query_str) \ No newline at end of file diff --git a/llama_hub/llama_packs/voyage_query_engine/requirements.txt b/llama_hub/llama_packs/voyage_query_engine/requirements.txt new file mode 100644 index 0000000000..6265ad08ff --- /dev/null +++ b/llama_hub/llama_packs/voyage_query_engine/requirements.txt @@ -0,0 +1,2 @@ +openai +voyageai \ No newline at end of file