From b00ba1c4afe5082f0c08b00d669c6b0602744e54 Mon Sep 17 00:00:00 2001 From: jjmachan Date: Fri, 25 Oct 2024 15:29:18 +0530 Subject: [PATCH 1/3] docs: added azure openai to default docs --- docs/extra/components/choose_evaluator_llm.md | 52 ++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/docs/extra/components/choose_evaluator_llm.md b/docs/extra/components/choose_evaluator_llm.md index bfb7f9555..2b4981d75 100644 --- a/docs/extra/components/choose_evaluator_llm.md +++ b/docs/extra/components/choose_evaluator_llm.md @@ -65,4 +65,54 @@ )) ``` - If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. \ No newline at end of file + If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. + +=== "Azure OpenAI" + Install the langchain-openai package + + ```bash + pip install langchain-openai + ``` + + Ensure you have your Azure OpenAI key ready and available in your environment. + + ```python + import os + os.environ["AZURE_OPENAI_API_KEY"] = "your-azure-openai-key" + + # other configuration + azure_config = { + "base_url": "", # your endpoint + "model_deployment": "", # your model deployment name + "model_name": "", # your model name + "embedding_deployment": "", # your embedding deployment name + "embedding_name": "", # your embedding name + } + + ``` + + Define your LLMs and wrap them in `LangchainLLMWrapper` so that it can be used with ragas. + + ```python + from langchain_openai import AzureChatOpenAI + from langchain_openai import AzureOpenAIEmbeddings + from ragas.llms import LangchainLLMWrapper + from ragas.embeddings import LangchainEmbeddingsWrapper + evaluator_llm = LangchainLLMWrapper(AzureChatOpenAI( + openai_api_version="2023-05-15", + azure_endpoint=azure_configs["base_url"], + azure_deployment=azure_configs["model_deployment"], + model=azure_configs["model_name"], + validate_base_url=False, + )) + + # init the embeddings for answer_relevancy, answer_correctness and answer_similarity + evaluator_embeddings = LangchainEmbeddingsWrapper(AzureOpenAIEmbeddings( + openai_api_version="2023-05-15", + azure_endpoint=azure_configs["base_url"], + azure_deployment=azure_configs["embedding_deployment"], + model=azure_configs["embedding_name"], + )) + ``` + + If you want more information on how to use other Azure services, please refer to the [langchain-azure](https://python.langchain.com/docs/integrations/chat/azure_chat_openai/) documentation. From 4bc0b2a3c416d74f3949d0da9929cb917b0b4009 Mon Sep 17 00:00:00 2001 From: jjmachan Date: Fri, 1 Nov 2024 16:56:25 +0530 Subject: [PATCH 2/3] docs: added for azure --- docs/extra/components/choose_generator_llm.md | 52 ++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/docs/extra/components/choose_generator_llm.md b/docs/extra/components/choose_generator_llm.md index fc2358eb9..4184f76e9 100644 --- a/docs/extra/components/choose_generator_llm.md +++ b/docs/extra/components/choose_generator_llm.md @@ -64,4 +64,54 @@ )) ``` - If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. \ No newline at end of file + If you want more information on how to use other AWS services, please refer to the [langchain-aws](https://python.langchain.com/docs/integrations/providers/aws/) documentation. + +=== "Azure OpenAI" + Install the langchain-openai package + + ```bash + pip install langchain-openai + ``` + + Ensure you have your Azure OpenAI key ready and available in your environment. + + ```python + import os + os.environ["AZURE_OPENAI_API_KEY"] = "your-azure-openai-key" + + # other configuration + azure_config = { + "base_url": "", # your endpoint + "model_deployment": "", # your model deployment name + "model_name": "", # your model name + "embedding_deployment": "", # your embedding deployment name + "embedding_name": "", # your embedding name + } + + ``` + + Define your LLMs and wrap them in `LangchainLLMWrapper` so that it can be used with ragas. + + ```python + from langchain_openai import AzureChatOpenAI + from langchain_openai import AzureOpenAIEmbeddings + from ragas.llms import LangchainLLMWrapper + from ragas.embeddings import LangchainEmbeddingsWrapper + generator_llm = LangchainLLMWrapper(AzureChatOpenAI( + openai_api_version="2023-05-15", + azure_endpoint=azure_configs["base_url"], + azure_deployment=azure_configs["model_deployment"], + model=azure_configs["model_name"], + validate_base_url=False, + )) + + # init the embeddings for answer_relevancy, answer_correctness and answer_similarity + generator_embeddings = LangchainEmbeddingsWrapper(AzureOpenAIEmbeddings( + openai_api_version="2023-05-15", + azure_endpoint=azure_configs["base_url"], + azure_deployment=azure_configs["embedding_deployment"], + model=azure_configs["embedding_name"], + )) + ``` + + If you want more information on how to use other Azure services, please refer to the [langchain-azure](https://python.langchain.com/docs/integrations/chat/azure_chat_openai/) documentation. From 971358b8879e57688dac2608d813009bd690a357 Mon Sep 17 00:00:00 2001 From: jjmachan Date: Mon, 4 Nov 2024 14:19:44 +0530 Subject: [PATCH 3/3] added other's tab to it --- docs/extra/components/choose_evaluator_llm.md | 22 +++++++++++++++++++ docs/extra/components/choose_generator_llm.md | 21 ++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/docs/extra/components/choose_evaluator_llm.md b/docs/extra/components/choose_evaluator_llm.md index 2b4981d75..7d0c73493 100644 --- a/docs/extra/components/choose_evaluator_llm.md +++ b/docs/extra/components/choose_evaluator_llm.md @@ -116,3 +116,25 @@ ``` If you want more information on how to use other Azure services, please refer to the [langchain-azure](https://python.langchain.com/docs/integrations/chat/azure_chat_openai/) documentation. + + +=== "Others" + If you are using a different LLM provider and using Langchain to interact with it, you can wrap your LLM in `LangchainLLMWrapper` so that it can be used with ragas. + + ```python + from ragas.llms import LangchainLLMWrapper + evaluator_llm = LangchainLLMWrapper(your_llm_instance) + ``` + + For a more detailed guide, checkout [the guide on customizing models](../../howtos/customizations/customize_models/). + + If you using LlamaIndex, you can use the `LlamaIndexLLMWrapper` to wrap your LLM so that it can be used with ragas. + + ```python + from ragas.llms import LlamaIndexLLMWrapper + evaluator_llm = LlamaIndexLLMWrapper(your_llm_instance) + ``` + + For more information on how to use LlamaIndex, please refer to the [LlamaIndex Integration guide](../../howtos/integrations/_llamaindex/). + + If your still not able use Ragas with your favorite LLM provider, please let us know by by commenting on this [issue](https://github.com/explodinggradients/ragas/issues/1617) and we'll add support for it 🙂. \ No newline at end of file diff --git a/docs/extra/components/choose_generator_llm.md b/docs/extra/components/choose_generator_llm.md index 4184f76e9..6d2160221 100644 --- a/docs/extra/components/choose_generator_llm.md +++ b/docs/extra/components/choose_generator_llm.md @@ -115,3 +115,24 @@ ``` If you want more information on how to use other Azure services, please refer to the [langchain-azure](https://python.langchain.com/docs/integrations/chat/azure_chat_openai/) documentation. + +=== "Others" + If you are using a different LLM provider and using Langchain to interact with it, you can wrap your LLM in `LangchainLLMWrapper` so that it can be used with ragas. + + ```python + from ragas.llms import LangchainLLMWrapper + generator_llm = LangchainLLMWrapper(your_llm_instance) + ``` + + For a more detailed guide, checkout [the guide on customizing models](../../howtos/customizations/customize_models/). + + If you using LlamaIndex, you can use the `LlamaIndexLLMWrapper` to wrap your LLM so that it can be used with ragas. + + ```python + from ragas.llms import LlamaIndexLLMWrapper + generator_llm = LlamaIndexLLMWrapper(your_llm_instance) + ``` + + For more information on how to use LlamaIndex, please refer to the [LlamaIndex Integration guide](../../howtos/integrations/_llamaindex/). + + If your still not able use Ragas with your favorite LLM provider, please let us know by by commenting on this [issue](https://github.com/explodinggradients/ragas/issues/1617) and we'll add support for it 🙂. \ No newline at end of file