diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_core/deploy_core.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_core/deploy_core.py deleted file mode 100644 index 8c7ff0e..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_core/deploy_core.py +++ /dev/null @@ -1,18 +0,0 @@ -from llama_deploy import ( - deploy_core, - ControlPlaneConfig, - SimpleMessageQueueConfig, -) - - -async def main(): - await deploy_core( - control_plane_config=ControlPlaneConfig(), - message_queue_config=SimpleMessageQueueConfig(), - ) - - -if __name__ == "__main__": - import asyncio - - asyncio.run(main()) \ No newline at end of file diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_1.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_1.py deleted file mode 100644 index ff875be..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_1.py +++ /dev/null @@ -1,48 +0,0 @@ -import os - -from llama_deploy import ( - deploy_workflow, - WorkflowServiceConfig, - ControlPlaneConfig -) -from workflows.retry_query_engine_workflow import build_rag_workflow_with_retry_query_engine -from dotenv import load_dotenv, find_dotenv - - -_ = load_dotenv(find_dotenv()) - - -async def deploy_rag_workflow_with_retry_query_engine(): - rag_workflow = build_rag_workflow_with_retry_query_engine() - try: - await deploy_workflow( - workflow=rag_workflow, - workflow_config=WorkflowServiceConfig( - host="127.0.0.1", - port=8002, - # service name matches the name of the workflow used in Agentic Workflow - service_name="rag_workflow_with_retry_query_engine", - description="RAG workflow", - ), - # Config controlled by env vars - control_plane_config=ControlPlaneConfig() - ) - except Exception as e: - print(e) - -if __name__ == "__main__": - import asyncio - import nest_asyncio - - nest_asyncio.apply() - try: - # deployment of workflow is driven by environmental variables - # if os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_query_engine()) - # elif os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_source_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_source_query_engine()) - # else: - # asyncio.run(deploy_rag_workflow_with_retry_guideline_query_engine()) - asyncio.run(deploy_rag_workflow_with_retry_query_engine()) - except Exception as e: - print(e) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_2.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_2.py deleted file mode 100644 index 6212f03..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_2.py +++ /dev/null @@ -1,51 +0,0 @@ -import os - -from llama_deploy import ( - deploy_workflow, - WorkflowServiceConfig, - ControlPlaneConfig -) -from workflows.retry_query_engine_workflow import build_rag_workflow_with_retry_query_engine -from workflows.retry_source_query_engine_workflow import build_rag_workflow_with_retry_source_query_engine -from workflows.retry_guideline_query_engine_workflow import build_rag_workflow_with_retry_guideline_query_engine -from dotenv import load_dotenv, find_dotenv - - -_ = load_dotenv(find_dotenv()) - - -async def deploy_rag_workflow_with_retry_source_query_engine(): - rag_workflow = build_rag_workflow_with_retry_source_query_engine() - try: - await deploy_workflow( - workflow=rag_workflow, - workflow_config=WorkflowServiceConfig( - host="127.0.0.1", - port=8003, - # service name matches the name of the workflow used in Agentic Workflow - service_name="rag_workflow_with_retry_source_query_engine", - description="RAG workflow", - ), - # Config controlled by env vars - control_plane_config=ControlPlaneConfig() - ) - except Exception as e: - print(e) - - -if __name__ == "__main__": - import asyncio - import nest_asyncio - - nest_asyncio.apply() - try: - # deployment of workflow is driven by environmental variables - # if os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_query_engine()) - # elif os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_source_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_source_query_engine()) - # else: - # asyncio.run(deploy_rag_workflow_with_retry_guideline_query_engine()) - asyncio.run(deploy_rag_workflow_with_retry_source_query_engine()) - except Exception as e: - print(e) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_3.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_3.py deleted file mode 100644 index 5fadd66..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/llama_deploy_workflow/deploy_workflow_3.py +++ /dev/null @@ -1,88 +0,0 @@ -import os - -from llama_deploy import ( - deploy_workflow, - WorkflowServiceConfig, - ControlPlaneConfig -) -from workflows.retry_query_engine_workflow import build_rag_workflow_with_retry_query_engine -from workflows.retry_source_query_engine_workflow import build_rag_workflow_with_retry_source_query_engine -from workflows.retry_guideline_query_engine_workflow import build_rag_workflow_with_retry_guideline_query_engine -from dotenv import load_dotenv, find_dotenv - - -_ = load_dotenv(find_dotenv()) - - -async def deploy_rag_workflow_with_retry_query_engine(): - rag_workflow = build_rag_workflow_with_retry_query_engine() - try: - await deploy_workflow( - workflow=rag_workflow, - workflow_config=WorkflowServiceConfig( - host="127.0.0.1", - port=8002, - # service name matches the name of the workflow used in Agentic Workflow - service_name="rag_workflow_with_retry_query_engine", - description="RAG workflow", - ), - # Config controlled by env vars - control_plane_config=ControlPlaneConfig() - ) - except Exception as e: - print(e) - - -async def deploy_rag_workflow_with_retry_source_query_engine(): - rag_workflow = build_rag_workflow_with_retry_source_query_engine() - try: - await deploy_workflow( - workflow=rag_workflow, - workflow_config=WorkflowServiceConfig( - host="127.0.0.1", - port=8003, - # service name matches the name of the workflow used in Agentic Workflow - service_name="rag_workflow_with_retry_source_query_engine", - description="RAG workflow", - ), - # Config controlled by env vars - control_plane_config=ControlPlaneConfig() - ) - except Exception as e: - print(e) - - -async def deploy_rag_workflow_with_retry_guideline_query_engine(): - rag_workflow = build_rag_workflow_with_retry_guideline_query_engine() - try: - await deploy_workflow( - workflow=rag_workflow, - workflow_config=WorkflowServiceConfig( - host="127.0.0.1", - port=8004, - # service name matches the name of the workflow used in Agentic Workflow - service_name="rag_workflow_with_retry_guideline_query_engine", - description="RAG workflow", - ), - # Config controlled by env vars - control_plane_config=ControlPlaneConfig() - ) - except Exception as e: - print(e) - -if __name__ == "__main__": - import asyncio - import nest_asyncio - - nest_asyncio.apply() - try: - # deployment of workflow is driven by environmental variables - # if os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_query_engine()) - # elif os.environ['ENABLED_WORKFLOW'] == 'deploy_rag_workflow_with_retry_source_query_engine': - # asyncio.run(deploy_rag_workflow_with_retry_source_query_engine()) - # else: - # asyncio.run(deploy_rag_workflow_with_retry_guideline_query_engine()) - asyncio.run(deploy_rag_workflow_with_retry_guideline_query_engine()) - except Exception as e: - print(e) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/readme.md b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/readme.md index ee0e535..df44beb 100644 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/readme.md +++ b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/readme.md @@ -11,4 +11,3 @@ - `python deploy_code.py` - `python deploy_workflow.py` - `python main.py` - diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/events.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/events.py deleted file mode 100644 index 26c323a..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/events.py +++ /dev/null @@ -1,9 +0,0 @@ -from llama_index.core.workflow import Event -from llama_index. core. base. base_query_engine import BaseQueryEngine - - -class QueryEngineEvent(Event): - """Result of running retrieval""" - - base_query_engine: BaseQueryEngine - diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_guideline_query_engine_workflow.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_guideline_query_engine_workflow.py deleted file mode 100644 index 2df25df..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_guideline_query_engine_workflow.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -import logging -import qdrant_client -from llama_index.core.workflow import ( - Workflow, - Context, - StartEvent, - StopEvent, - step -) -from llama_index.core.base.base_query_engine import BaseQueryEngine -from llama_index.core.query_engine import RetryGuidelineQueryEngine -from llama_index.core import (VectorStoreIndex, Settings, StorageContext, SimpleDirectoryReader) -from llama_index.core.evaluation import GuidelineEvaluator -from llama_index.vector_stores.qdrant import QdrantVectorStore -from llama_index.llms.ollama import Ollama -from llama_index.embeddings.ollama import OllamaEmbedding -from llama_index.core.evaluation.guideline import DEFAULT_GUIDELINES -from dotenv import load_dotenv, find_dotenv -from workflows.events import QueryEngineEvent - -_ = load_dotenv(find_dotenv()) - -logging.basicConfig(level=int(os.environ['INFO'])) -logger = logging.getLogger(__name__) - - -class RAGWorkflowWithRetryGuidelineQueryEngine(Workflow): - def __init__(self, index: VectorStoreIndex, *args, **kwargs): - super().__init__(*args, **kwargs) - self.index: VectorStoreIndex = index - - @step - async def create_retry_query_engine(self, ctx: Context, ev: StartEvent) -> QueryEngineEvent | None: - "Entry point for RAG, triggered by a StartEvent with `query`." - logger.info(f"creating query engine for query: {ev.get('query')}") - query = ev.get("query") - no_of_retries = ev.get("no_of_retries", default=3) - - if not query: - raise ValueError("Query is required!") - - # store the settings in the global context - await ctx.set("query", query) - await ctx.set("no_of_retries", no_of_retries) - - base_query_engine = self.index.as_query_engine(llm=Settings.llm, similarity_top_k=2, sparse_top_k=12, - vector_store_query_mode="hybrid") - return QueryEngineEvent(base_query_engine=base_query_engine) - - @step - async def query_with_retry_source_query_engine(self, ctx: Context, ev: QueryEngineEvent) -> StopEvent: - """Return a response using reranked nodes.""" - query = await ctx.get("query") - no_of_retries = await ctx.get("no_of_retries") - base_query_engine: BaseQueryEngine = ev.base_query_engine - - # Guideline eval - guideline_eval = GuidelineEvaluator( - guidelines=DEFAULT_GUIDELINES + "\nThe response should not be overly long.\n" - "The response should try to summarize where possible.\n" - ) # just for example - retry_guideline_query_engine = RetryGuidelineQueryEngine(base_query_engine, guideline_eval, - resynthesize_query=True, max_retries=no_of_retries) - retry_guideline_response = retry_guideline_query_engine.query(query) - logger.info(f"response for query is: {retry_guideline_response}") - return StopEvent(result=str(retry_guideline_response)) - - -def build_rag_workflow_with_retry_guideline_query_engine() -> RAGWorkflowWithRetryGuidelineQueryEngine: - index_loaded = False - # host points to qdrant in docker-compose.yml - client = qdrant_client.QdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - aclient = qdrant_client.AsyncQdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - vector_store = QdrantVectorStore(collection_name=os.environ['COLLECTION_NAME'], client=client, aclient=aclient, - enable_hybrid=True, batch_size=50) - - Settings.llm = Ollama(model=os.environ['OLLAMA_LLM_MODEL'], base_url=os.environ['OLLAMA_BASE_URL'], - request_timeout=600) - Settings.embed_model = OllamaEmbedding(model_name=os.environ['OLLAMA_EMBED_MODEL'], - base_url=os.environ['OLLAMA_BASE_URL']) - - # index = VectorStoreIndex.from_vector_store(vector_store=vector_store, embed_model=Settings.embed_model) - index: VectorStoreIndex = None - - if client.collection_exists(collection_name=os.environ['COLLECTION_NAME']): - try: - index = VectorStoreIndex.from_vector_store(vector_store=vector_store) - index_loaded = True - except Exception as e: - index_loaded = False - - if not index_loaded: - # load data - _docs = (SimpleDirectoryReader(input_dir='../data', required_exts=['.pdf']).load_data(show_progress=True)) - - # build and persist index - storage_context = StorageContext.from_defaults(vector_store=vector_store) - logger.info("indexing the docs in VectorStoreIndex") - index = VectorStoreIndex.from_documents(documents=_docs, storage_context=storage_context, show_progress=True) - - return RAGWorkflowWithRetryGuidelineQueryEngine(index=index, timeout=120.0) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_query_engine_workflow.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_query_engine_workflow.py deleted file mode 100644 index 9b4a845..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_query_engine_workflow.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import logging -import qdrant_client -from llama_index.core.workflow import ( - Workflow, - Context, - StartEvent, - StopEvent, - step -) -from llama_index.core.base.base_query_engine import BaseQueryEngine -from llama_index.core.query_engine import RetryQueryEngine -from llama_index.core import (VectorStoreIndex, Settings, StorageContext, SimpleDirectoryReader) -from llama_index.core.evaluation import RelevancyEvaluator -from llama_index.vector_stores.qdrant import QdrantVectorStore -from llama_index.llms.ollama import Ollama -from llama_index.embeddings.ollama import OllamaEmbedding -from dotenv import load_dotenv, find_dotenv - -from workflows.events import QueryEngineEvent - -_ = load_dotenv(find_dotenv()) - -logging.basicConfig(level=int(os.environ['INFO'])) -logger = logging.getLogger(__name__) - - -class RAGWorkflowWithRetryQueryEngine(Workflow): - def __init__(self, index: VectorStoreIndex, *args, **kwargs): - super().__init__(*args, **kwargs) - self.index: VectorStoreIndex = index - self.query_response_evaluator = RelevancyEvaluator() - - @step - async def create_retry_query_engine(self, ctx: Context, ev: StartEvent) -> QueryEngineEvent | None: - "Entry point for RAG, triggered by a StartEvent with `query`." - logger.info(f"creating query engine for query: {ev.get('query')}") - query = ev.get("query") - no_of_retries = ev.get("no_of_retries", default=3) - - if not query: - raise ValueError("Query is required!") - - # store the settings in the global context - await ctx.set("query", query) - await ctx.set("no_of_retries", no_of_retries) - - base_query_engine = self.index.as_query_engine(llm=Settings.llm, similarity_top_k=2, sparse_top_k=12, - vector_store_query_mode="hybrid") - return QueryEngineEvent(base_query_engine=base_query_engine) - - @step - async def query_with_retry_query_engine(self, ctx: Context, ev: QueryEngineEvent) -> StopEvent: - """Return a response using reranked nodes.""" - query = await ctx.get("query") - no_of_retries = await ctx.get("no_of_retries") - base_query_engine: BaseQueryEngine = ev.base_query_engine - - retry_query_engine = RetryQueryEngine(base_query_engine, self.query_response_evaluator, - max_retries=no_of_retries) - retry_response = retry_query_engine.query(query) - logger.info(f"response for query is: {retry_response}") - return StopEvent(result=str(retry_response)) - - -def build_rag_workflow_with_retry_query_engine() -> RAGWorkflowWithRetryQueryEngine: - index_loaded = False - # host points to qdrant in docker-compose.yml - client = qdrant_client.QdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - aclient = qdrant_client.AsyncQdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - vector_store = QdrantVectorStore(collection_name=os.environ['COLLECTION_NAME'], client=client, aclient=aclient, - enable_hybrid=True, batch_size=50) - - Settings.llm = Ollama(model=os.environ['OLLAMA_LLM_MODEL'], base_url=os.environ['OLLAMA_BASE_URL'], - request_timeout=600) - Settings.embed_model = OllamaEmbedding(model_name=os.environ['OLLAMA_EMBED_MODEL'], - base_url=os.environ['OLLAMA_BASE_URL']) - - # index = VectorStoreIndex.from_vector_store(vector_store=vector_store, embed_model=Settings.embed_model) - index: VectorStoreIndex = None - - if client.collection_exists(collection_name=os.environ['COLLECTION_NAME']): - try: - index = VectorStoreIndex.from_vector_store(vector_store=vector_store) - index_loaded = True - except Exception as e: - index_loaded = False - - if not index_loaded: - # load data - _docs = (SimpleDirectoryReader(input_dir='../data', required_exts=['.pdf']).load_data(show_progress=True)) - - # build and persist index - storage_context = StorageContext.from_defaults(vector_store=vector_store) - logger.info("indexing the docs in VectorStoreIndex") - index = VectorStoreIndex.from_documents(documents=_docs, storage_context=storage_context, show_progress=True) - - return RAGWorkflowWithRetryQueryEngine(index=index, timeout=120.0) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_source_query_engine_workflow.py b/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_source_query_engine_workflow.py deleted file mode 100644 index 22c04a1..0000000 --- a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/retry_source_query_engine_workflow.py +++ /dev/null @@ -1,98 +0,0 @@ -import os -import logging -import qdrant_client -from llama_index.core.workflow import ( - Workflow, - Context, - StartEvent, - StopEvent, - step -) -from llama_index.core.base.base_query_engine import BaseQueryEngine -from llama_index.core.query_engine import RetrySourceQueryEngine -from llama_index.core import (VectorStoreIndex, Settings, StorageContext, SimpleDirectoryReader) -from llama_index.core.evaluation import RelevancyEvaluator -from llama_index.vector_stores.qdrant import QdrantVectorStore -from llama_index.llms.ollama import Ollama -from llama_index.embeddings.ollama import OllamaEmbedding -from dotenv import load_dotenv, find_dotenv - -from workflows.events import QueryEngineEvent - -_ = load_dotenv(find_dotenv()) - -logging.basicConfig(level=int(os.environ['INFO'])) -logger = logging.getLogger(__name__) - - -class RAGWorkflowWithRetrySourceQueryEngine(Workflow): - def __init__(self, index: VectorStoreIndex, *args, **kwargs): - super().__init__(*args, **kwargs) - self.index: VectorStoreIndex = index - self.query_response_evaluator = RelevancyEvaluator() - - @step - async def create_retry_query_engine(self, ctx: Context, ev: StartEvent) -> QueryEngineEvent | None: - "Entry point for RAG, triggered by a StartEvent with `query`." - logger.info(f"creating query engine for query: {ev.get('query')}") - query = ev.get("query") - no_of_retries = ev.get("no_of_retries", default=3) - - if not query: - raise ValueError("Query is required!") - - # store the settings in the global context - await ctx.set("query", query) - await ctx.set("no_of_retries", no_of_retries) - - base_query_engine = self.index.as_query_engine(llm=Settings.llm, similarity_top_k=2, sparse_top_k=12, - vector_store_query_mode="hybrid") - return QueryEngineEvent(base_query_engine=base_query_engine) - - @step - async def query_with_retry_source_query_engine(self, ctx: Context, ev: QueryEngineEvent) -> StopEvent: - """Return a response using reranked nodes.""" - query = await ctx.get("query") - no_of_retries = await ctx.get("no_of_retries") - base_query_engine: BaseQueryEngine = ev.base_query_engine - - retry_source_query_engine = RetrySourceQueryEngine(base_query_engine, self.query_response_evaluator, - max_retries=no_of_retries) - retry_source_response = retry_source_query_engine.query(query) - logger.info(f"response for query is: {retry_source_response}") - return StopEvent(result=str(retry_source_response)) - - -def build_rag_workflow_with_retry_source_query_engine() -> RAGWorkflowWithRetrySourceQueryEngine: - index_loaded = False - # host points to qdrant in docker-compose.yml - client = qdrant_client.QdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - aclient = qdrant_client.AsyncQdrantClient(url=os.environ['DB_URL'], api_key=os.environ['DB_API_KEY']) - vector_store = QdrantVectorStore(collection_name=os.environ['COLLECTION_NAME'], client=client, aclient=aclient, - enable_hybrid=True, batch_size=50) - - Settings.llm = Ollama(model=os.environ['OLLAMA_LLM_MODEL'], base_url=os.environ['OLLAMA_BASE_URL'], - request_timeout=600) - Settings.embed_model = OllamaEmbedding(model_name=os.environ['OLLAMA_EMBED_MODEL'], - base_url=os.environ['OLLAMA_BASE_URL']) - - # index = VectorStoreIndex.from_vector_store(vector_store=vector_store, embed_model=Settings.embed_model) - index: VectorStoreIndex = None - - if client.collection_exists(collection_name=os.environ['COLLECTION_NAME']): - try: - index = VectorStoreIndex.from_vector_store(vector_store=vector_store) - index_loaded = True - except Exception as e: - index_loaded = False - - if not index_loaded: - # load data - _docs = (SimpleDirectoryReader(input_dir='../data', required_exts=['.pdf']).load_data(show_progress=True)) - - # build and persist index - storage_context = StorageContext.from_defaults(vector_store=vector_store) - logger.info("indexing the docs in VectorStoreIndex") - index = VectorStoreIndex.from_documents(documents=_docs, storage_context=storage_context, show_progress=True) - - return RAGWorkflowWithRetrySourceQueryEngine(index=index, timeout=120.0) diff --git a/bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_core/__init__.py similarity index 100% rename from bootstraprag/templates/llamaindex/llama_deploy_with_kafka/workflows/__init__.py rename to bootstraprag/templates/llamaindex/rag_with_self_correction/api_core/__init__.py diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/api_core/config.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_core/config.py new file mode 100644 index 0000000..d944427 --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_core/config.py @@ -0,0 +1,5 @@ +class Settings: + PROJECT_NAME: str = "Simple RAG as FastAPI Application" + + +settings = Settings() diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/api_routes/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/api_routes/apis.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_routes/apis.py new file mode 100644 index 0000000..a0d264a --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction/api_routes/apis.py @@ -0,0 +1,14 @@ +from fastapi import APIRouter, Depends +from models.payload import Payload +from self_correction_core import SelfCorrectingRAG + + +self_correcting_rag = SelfCorrectingRAG(input_dir='data', show_progress=True, no_of_retries=3) + +router = APIRouter(prefix="/api/v1/rag", tags=["rag"]) + + +@router.post(path='/query') +def fetch_response(payload: Payload): + response = self_correcting_rag.query_with_source_query_engine(query=payload.query) + return response diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/apis.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/apis.py new file mode 100644 index 0000000..3a885a5 --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction/apis.py @@ -0,0 +1,110 @@ +from fastapi import FastAPI, Request +from fastapi.openapi.utils import get_openapi +from api_routes.apis import router +from fastapi.middleware.cors import CORSMiddleware +import uvicorn +import logging +import time + +logging.basicConfig(level=logging.DEBUG) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) +allowed_origins = [ + "*" +] + +app = FastAPI( + title="My FastAPI Application", + description="This is a FastAPI implementation for RAG application with Swagger UI configurations.", + version="1.0.0", + docs_url="/documentation", + redoc_url="/redoc", + openapi_url="/openapi.json", + contact={ + "name": "M K Pavan Kumar", + "linkedin": "https://www.linkedin.com", + }, + license_info={ + "name": "MIT License", + "url": "https://opensource.org/licenses/MIT", + }, + terms_of_service="https://www.yourwebsite.com/terms/", +) +app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(router) + + +# Custom OpenAPI schema generation (optional) +def custom_openapi(): + if app.openapi_schema: + return app.openapi_schema + openapi_schema = get_openapi( + title="RAG APIs", + version="1.0.0", + description="This is a custom OpenAPI schema with additional metadata.", + routes=app.routes, + tags=[ + { + "name": "rag", + "description": "Operations for RAG query.", + } + ], + ) + # Modify openapi_schema as needed + app.openapi_schema = openapi_schema + return app.openapi_schema + + +app.openapi = custom_openapi + + +@app.middleware("http") +async def log_requests(request: Request, call_next): + try: + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + except Exception as e: + logger.exception(f"Error processing request: {e}") + raise e + + +# Request Timing Middleware +@app.middleware("http") +async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + logger.info(f"Processed in {process_time:.4f} seconds") + return response + + +# Logging Middleware +@app.middleware("http") +async def log_requests(request: Request, call_next): + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + + +if __name__ == "__main__": + uvicorn.run( + "apis:app", + host="127.0.0.1", + port=8000, + reload=True, + log_level="info", + workers=1, + ) diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/models/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/models/payload.py b/bootstraprag/templates/llamaindex/rag_with_self_correction/models/payload.py new file mode 100644 index 0000000..ae09aba --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction/models/payload.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class Payload(BaseModel): + query: str diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction/readme.md b/bootstraprag/templates/llamaindex/rag_with_self_correction/readme.md index a1e65ec..fc5793d 100644 --- a/bootstraprag/templates/llamaindex/rag_with_self_correction/readme.md +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction/readme.md @@ -13,3 +13,9 @@ - query_with_retry_query_engine(query=user_query) - query_with_source_query_engine(query=user_query) - query_with_guideline_query_engine(query=user_query) + +### How to expose RAG as API +- run `python apis.py` +- verify the swagger redoc and documentation as below +- open browser and hit `http://localhost:8000/redoc` +- open browser and hit `http://localhost:8000/documentation` diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_core/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_core/config.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_core/config.py new file mode 100644 index 0000000..d944427 --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_core/config.py @@ -0,0 +1,5 @@ +class Settings: + PROJECT_NAME: str = "Simple RAG as FastAPI Application" + + +settings = Settings() diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_routes/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_routes/apis.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_routes/apis.py new file mode 100644 index 0000000..a0d264a --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/api_routes/apis.py @@ -0,0 +1,14 @@ +from fastapi import APIRouter, Depends +from models.payload import Payload +from self_correction_core import SelfCorrectingRAG + + +self_correcting_rag = SelfCorrectingRAG(input_dir='data', show_progress=True, no_of_retries=3) + +router = APIRouter(prefix="/api/v1/rag", tags=["rag"]) + + +@router.post(path='/query') +def fetch_response(payload: Payload): + response = self_correcting_rag.query_with_source_query_engine(query=payload.query) + return response diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/apis.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/apis.py new file mode 100644 index 0000000..3a885a5 --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/apis.py @@ -0,0 +1,110 @@ +from fastapi import FastAPI, Request +from fastapi.openapi.utils import get_openapi +from api_routes.apis import router +from fastapi.middleware.cors import CORSMiddleware +import uvicorn +import logging +import time + +logging.basicConfig(level=logging.DEBUG) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) +allowed_origins = [ + "*" +] + +app = FastAPI( + title="My FastAPI Application", + description="This is a FastAPI implementation for RAG application with Swagger UI configurations.", + version="1.0.0", + docs_url="/documentation", + redoc_url="/redoc", + openapi_url="/openapi.json", + contact={ + "name": "M K Pavan Kumar", + "linkedin": "https://www.linkedin.com", + }, + license_info={ + "name": "MIT License", + "url": "https://opensource.org/licenses/MIT", + }, + terms_of_service="https://www.yourwebsite.com/terms/", +) +app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(router) + + +# Custom OpenAPI schema generation (optional) +def custom_openapi(): + if app.openapi_schema: + return app.openapi_schema + openapi_schema = get_openapi( + title="RAG APIs", + version="1.0.0", + description="This is a custom OpenAPI schema with additional metadata.", + routes=app.routes, + tags=[ + { + "name": "rag", + "description": "Operations for RAG query.", + } + ], + ) + # Modify openapi_schema as needed + app.openapi_schema = openapi_schema + return app.openapi_schema + + +app.openapi = custom_openapi + + +@app.middleware("http") +async def log_requests(request: Request, call_next): + try: + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + except Exception as e: + logger.exception(f"Error processing request: {e}") + raise e + + +# Request Timing Middleware +@app.middleware("http") +async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + logger.info(f"Processed in {process_time:.4f} seconds") + return response + + +# Logging Middleware +@app.middleware("http") +async def log_requests(request: Request, call_next): + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + + +if __name__ == "__main__": + uvicorn.run( + "apis:app", + host="127.0.0.1", + port=8000, + reload=True, + log_level="info", + workers=1, + ) diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/models/__init__.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/models/payload.py b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/models/payload.py new file mode 100644 index 0000000..ae09aba --- /dev/null +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/models/payload.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class Payload(BaseModel): + query: str diff --git a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/readme.md b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/readme.md index b585cf1..782ba73 100644 --- a/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/readme.md +++ b/bootstraprag/templates/llamaindex/rag_with_self_correction_with_observability/readme.md @@ -14,4 +14,10 @@ - query_with_source_query_engine(query=user_query) - query_with_guideline_query_engine(query=user_query) -- This code is enabled with Observability powered by `Arize Phoenix`. \ No newline at end of file +- This code is enabled with Observability powered by `Arize Phoenix`. + +### How to expose RAG as API +- run `python apis.py` +- verify the swagger redoc and documentation as below +- open browser and hit `http://localhost:8000/redoc` +- open browser and hit `http://localhost:8000/documentation` \ No newline at end of file diff --git a/bootstraprag/templates/llamaindex/simple_rag/api_core/__init__.py b/bootstraprag/templates/llamaindex/simple_rag/api_core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag/api_core/config.py b/bootstraprag/templates/llamaindex/simple_rag/api_core/config.py new file mode 100644 index 0000000..d944427 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag/api_core/config.py @@ -0,0 +1,5 @@ +class Settings: + PROJECT_NAME: str = "Simple RAG as FastAPI Application" + + +settings = Settings() diff --git a/bootstraprag/templates/llamaindex/simple_rag/api_routes/__init__.py b/bootstraprag/templates/llamaindex/simple_rag/api_routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag/api_routes/apis.py b/bootstraprag/templates/llamaindex/simple_rag/api_routes/apis.py new file mode 100644 index 0000000..a23f3c2 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag/api_routes/apis.py @@ -0,0 +1,12 @@ +from fastapi import APIRouter, Depends +from models.payload import Payload +from simple_rag import SimpleRAG + +router = APIRouter(prefix="/api/v1/rag", tags=["rag"]) +simpleRAG = SimpleRAG(input_dir='data', show_progress=True) + + +@router.post(path='/query') +def fetch_response(payload: Payload): + response = simpleRAG.do_rag(user_query=payload.query) + return response diff --git a/bootstraprag/templates/llamaindex/simple_rag/apis.py b/bootstraprag/templates/llamaindex/simple_rag/apis.py new file mode 100644 index 0000000..3a885a5 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag/apis.py @@ -0,0 +1,110 @@ +from fastapi import FastAPI, Request +from fastapi.openapi.utils import get_openapi +from api_routes.apis import router +from fastapi.middleware.cors import CORSMiddleware +import uvicorn +import logging +import time + +logging.basicConfig(level=logging.DEBUG) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) +allowed_origins = [ + "*" +] + +app = FastAPI( + title="My FastAPI Application", + description="This is a FastAPI implementation for RAG application with Swagger UI configurations.", + version="1.0.0", + docs_url="/documentation", + redoc_url="/redoc", + openapi_url="/openapi.json", + contact={ + "name": "M K Pavan Kumar", + "linkedin": "https://www.linkedin.com", + }, + license_info={ + "name": "MIT License", + "url": "https://opensource.org/licenses/MIT", + }, + terms_of_service="https://www.yourwebsite.com/terms/", +) +app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(router) + + +# Custom OpenAPI schema generation (optional) +def custom_openapi(): + if app.openapi_schema: + return app.openapi_schema + openapi_schema = get_openapi( + title="RAG APIs", + version="1.0.0", + description="This is a custom OpenAPI schema with additional metadata.", + routes=app.routes, + tags=[ + { + "name": "rag", + "description": "Operations for RAG query.", + } + ], + ) + # Modify openapi_schema as needed + app.openapi_schema = openapi_schema + return app.openapi_schema + + +app.openapi = custom_openapi + + +@app.middleware("http") +async def log_requests(request: Request, call_next): + try: + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + except Exception as e: + logger.exception(f"Error processing request: {e}") + raise e + + +# Request Timing Middleware +@app.middleware("http") +async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + logger.info(f"Processed in {process_time:.4f} seconds") + return response + + +# Logging Middleware +@app.middleware("http") +async def log_requests(request: Request, call_next): + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + + +if __name__ == "__main__": + uvicorn.run( + "apis:app", + host="127.0.0.1", + port=8000, + reload=True, + log_level="info", + workers=1, + ) diff --git a/bootstraprag/templates/llamaindex/simple_rag/models/__init__.py b/bootstraprag/templates/llamaindex/simple_rag/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag/models/payload.py b/bootstraprag/templates/llamaindex/simple_rag/models/payload.py new file mode 100644 index 0000000..ae09aba --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag/models/payload.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class Payload(BaseModel): + query: str diff --git a/bootstraprag/templates/llamaindex/simple_rag/readme.md b/bootstraprag/templates/llamaindex/simple_rag/readme.md index f4524f6..f6e80f2 100644 --- a/bootstraprag/templates/llamaindex/simple_rag/readme.md +++ b/bootstraprag/templates/llamaindex/simple_rag/readme.md @@ -6,3 +6,9 @@ - In the data folder place your data preferably any ".pdf" #### Note: ensure your qdrant and ollama (if LLM models are pointing to local) are running - run `python main.py` + +### How to expose RAG as API +- run `python apis.py` +- verify the swagger redoc and documentation as below +- open browser and hit `http://localhost:8000/redoc` +- open browser and hit `http://localhost:8000/documentation` \ No newline at end of file diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_core/__init__.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_core/config.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_core/config.py new file mode 100644 index 0000000..d944427 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_core/config.py @@ -0,0 +1,5 @@ +class Settings: + PROJECT_NAME: str = "Simple RAG as FastAPI Application" + + +settings = Settings() diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_routes/__init__.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_routes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_routes/apis.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_routes/apis.py new file mode 100644 index 0000000..a23f3c2 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag_with_observability/api_routes/apis.py @@ -0,0 +1,12 @@ +from fastapi import APIRouter, Depends +from models.payload import Payload +from simple_rag import SimpleRAG + +router = APIRouter(prefix="/api/v1/rag", tags=["rag"]) +simpleRAG = SimpleRAG(input_dir='data', show_progress=True) + + +@router.post(path='/query') +def fetch_response(payload: Payload): + response = simpleRAG.do_rag(user_query=payload.query) + return response diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/apis.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/apis.py new file mode 100644 index 0000000..3a885a5 --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag_with_observability/apis.py @@ -0,0 +1,110 @@ +from fastapi import FastAPI, Request +from fastapi.openapi.utils import get_openapi +from api_routes.apis import router +from fastapi.middleware.cors import CORSMiddleware +import uvicorn +import logging +import time + +logging.basicConfig(level=logging.DEBUG) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", +) +logger = logging.getLogger(__name__) +allowed_origins = [ + "*" +] + +app = FastAPI( + title="My FastAPI Application", + description="This is a FastAPI implementation for RAG application with Swagger UI configurations.", + version="1.0.0", + docs_url="/documentation", + redoc_url="/redoc", + openapi_url="/openapi.json", + contact={ + "name": "M K Pavan Kumar", + "linkedin": "https://www.linkedin.com", + }, + license_info={ + "name": "MIT License", + "url": "https://opensource.org/licenses/MIT", + }, + terms_of_service="https://www.yourwebsite.com/terms/", +) +app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(router) + + +# Custom OpenAPI schema generation (optional) +def custom_openapi(): + if app.openapi_schema: + return app.openapi_schema + openapi_schema = get_openapi( + title="RAG APIs", + version="1.0.0", + description="This is a custom OpenAPI schema with additional metadata.", + routes=app.routes, + tags=[ + { + "name": "rag", + "description": "Operations for RAG query.", + } + ], + ) + # Modify openapi_schema as needed + app.openapi_schema = openapi_schema + return app.openapi_schema + + +app.openapi = custom_openapi + + +@app.middleware("http") +async def log_requests(request: Request, call_next): + try: + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + except Exception as e: + logger.exception(f"Error processing request: {e}") + raise e + + +# Request Timing Middleware +@app.middleware("http") +async def add_process_time_header(request: Request, call_next): + start_time = time.time() + response = await call_next(request) + process_time = time.time() - start_time + response.headers["X-Process-Time"] = str(process_time) + logger.info(f"Processed in {process_time:.4f} seconds") + return response + + +# Logging Middleware +@app.middleware("http") +async def log_requests(request: Request, call_next): + logger.info(f"Incoming request: {request.method} {request.url}") + response = await call_next(request) + logger.info(f"Response status: {response.status_code}") + return response + + +if __name__ == "__main__": + uvicorn.run( + "apis:app", + host="127.0.0.1", + port=8000, + reload=True, + log_level="info", + workers=1, + ) diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/models/__init__.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/models/payload.py b/bootstraprag/templates/llamaindex/simple_rag_with_observability/models/payload.py new file mode 100644 index 0000000..ae09aba --- /dev/null +++ b/bootstraprag/templates/llamaindex/simple_rag_with_observability/models/payload.py @@ -0,0 +1,5 @@ +from pydantic import BaseModel + + +class Payload(BaseModel): + query: str diff --git a/bootstraprag/templates/llamaindex/simple_rag_with_observability/readme.md b/bootstraprag/templates/llamaindex/simple_rag_with_observability/readme.md index f4524f6..fdbf8a4 100644 --- a/bootstraprag/templates/llamaindex/simple_rag_with_observability/readme.md +++ b/bootstraprag/templates/llamaindex/simple_rag_with_observability/readme.md @@ -6,3 +6,9 @@ - In the data folder place your data preferably any ".pdf" #### Note: ensure your qdrant and ollama (if LLM models are pointing to local) are running - run `python main.py` +- This code is enabled with Observability powered by `Arize Phoenix`. +### How to expose RAG as API +- run `python apis.py` +- verify the swagger redoc and documentation as below +- open browser and hit `http://localhost:8000/redoc` +- open browser and hit `http://localhost:8000/documentation` diff --git a/setup.py b/setup.py index d2160ee..c8f195e 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name='bootstrap-rag', - version='0.0.3', + version='0.0.4', packages=find_packages(), include_package_data=True, install_requires=[