From 8b8d1fe7c74caf2e648f00a4f48f3466e03cacc5 Mon Sep 17 00:00:00 2001 From: Shahules786 Date: Thu, 19 Oct 2023 17:22:25 +0530 Subject: [PATCH 1/3] add langchain loaders to docs --- docs/concepts/testset_generation.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/concepts/testset_generation.md b/docs/concepts/testset_generation.md index 99ca0328a..abe95067a 100644 --- a/docs/concepts/testset_generation.md +++ b/docs/concepts/testset_generation.md @@ -35,6 +35,27 @@ Moving forward, we are will be expanding the range of evolution techniques to of ## Example +```{code-block} python +:caption: loading documents using langchain +from langchain.document_loaders import PubMedLoader + +loader = PubMedLoader("liver", load_max_docs=10) +documents = loader.load() +``` +Checkout [langchain](https://python.langchain.com/docs/modules/data_connection/document_loaders/) document loaders to see more examples + +```{code-block} python +:caption: loading documents using llama-index +from llama_index import download_loader + +SemanticScholarReader = download_loader("SemanticScholarReader") +loader = SemanticScholarReader() +query_space = "large language models" +documents = loader.load_data(query=query_space, limit=10) +``` +Checkout [llama-index](https://gpt-index.readthedocs.io/en/stable/core_modules/data_modules/connector/root.html) document loaders to see more examples + + ```{code-block} python :caption: Customising test set generation from ragas.testset import TestsetGenerator From 472a623d1309787a88755d1085066f69b59bc094 Mon Sep 17 00:00:00 2001 From: Shahules786 Date: Fri, 5 Jan 2024 09:56:50 +0530 Subject: [PATCH 2/3] add import --- src/ragas/llms/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ragas/llms/base.py b/src/ragas/llms/base.py index 3d2e117e2..d9a682d54 100644 --- a/src/ragas/llms/base.py +++ b/src/ragas/llms/base.py @@ -9,11 +9,12 @@ from langchain_core.language_models import BaseLanguageModel from langchain_core.outputs import LLMResult +from ragas.llms.prompt import PromptValue + if t.TYPE_CHECKING: from langchain_core.callbacks import Callbacks from langchain_core.prompts import ChatPromptTemplate - from ragas.llms.prompt import PromptValue MULTIPLE_COMPLETION_SUPPORTED = [ OpenAI, From ab4da3857edb4037b503ee3a1cb7ec729f8b3c8b Mon Sep 17 00:00:00 2001 From: Shahules786 Date: Sat, 6 Jan 2024 11:28:26 +0530 Subject: [PATCH 3/3] lazy import --- src/ragas/llms/base.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/ragas/llms/base.py b/src/ragas/llms/base.py index d9a682d54..2f33981cc 100644 --- a/src/ragas/llms/base.py +++ b/src/ragas/llms/base.py @@ -9,11 +9,13 @@ from langchain_core.language_models import BaseLanguageModel from langchain_core.outputs import LLMResult -from ragas.llms.prompt import PromptValue if t.TYPE_CHECKING: from langchain_core.callbacks import Callbacks from langchain_core.prompts import ChatPromptTemplate + + from ragas.llms.prompt import PromptValue + MULTIPLE_COMPLETION_SUPPORTED = [ @@ -67,6 +69,9 @@ def generate_text_with_hmpt( stop: t.Optional[t.List[str]] = None, callbacks: Callbacks = [], ) -> LLMResult: + + from ragas.llms.prompt import PromptValue + prompt = PromptValue(prompt_str=prompts[0].format()) return self.generate_text(prompt, n, temperature, stop, callbacks)