Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/grumpy-tigers-heal.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Simplify the local index code.
5 changes: 3 additions & 2 deletions templates/components/workflows/python/agentic_rag/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,12 @@


def create_workflow(chat_request: Optional[ChatRequest] = None) -> AgentWorkflow:
query_tool = get_query_engine_tool(index=get_index(chat_request=chat_request))
if query_tool is None:
index = get_index(chat_request=chat_request)
if index is None:
raise RuntimeError(
"Index not found! Please run `poetry run generate` to index the data first."
)
query_tool = get_query_engine_tool(index=index)
return AgentWorkflow.from_tools_or_functions(
tools_or_functions=[query_tool],
llm=Settings.llm or OpenAI(model="gpt-4o-mini"),
Expand Down
19 changes: 5 additions & 14 deletions templates/types/llamaindexserver/fastapi/app/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,19 @@
from llama_index.core.indices import load_index_from_storage
from llama_index.server.api.models import ChatRequest
from llama_index.server.tools.index.utils import get_storage_context
from pydantic import BaseModel

logger = logging.getLogger("uvicorn")


class IndexConfig(BaseModel):
storage_dir: str = "storage"

@classmethod
def from_default(cls, chat_request: Optional[ChatRequest] = None) -> "IndexConfig":
return cls()
STORAGE_DIR = "storage"


def get_index(chat_request: Optional[ChatRequest] = None):
config = IndexConfig.from_default(chat_request)
storage_dir = config.storage_dir
# check if storage already exists
if not os.path.exists(storage_dir):
if not os.path.exists(STORAGE_DIR):
return None
# load the existing index
logger.info(f"Loading index from {storage_dir}...")
storage_context = get_storage_context(storage_dir)
logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = get_storage_context(STORAGE_DIR)
index = load_index_from_storage(storage_context)
logger.info(f"Finished loading index from {storage_dir}")
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index
9 changes: 4 additions & 5 deletions templates/types/llamaindexserver/fastapi/generate.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
import os

from dotenv import load_dotenv

from app.index import STORAGE_DIR
from app.settings import init_settings
from dotenv import load_dotenv
from llama_index.core.indices import (
VectorStoreIndex,
)
Expand All @@ -18,7 +18,6 @@ def generate_datasource():
init_settings()

logger.info("Creating new index")
storage_dir = os.environ.get("STORAGE_DIR", "storage")
# load the documents and create the index
reader = SimpleDirectoryReader(
os.environ.get("DATA_DIR", "data"),
Expand All @@ -30,5 +29,5 @@ def generate_datasource():
show_progress=True,
)
# store it for later
index.storage_context.persist(storage_dir)
logger.info(f"Finished creating new index. Stored in {storage_dir}")
index.storage_context.persist(STORAGE_DIR)
logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")