Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 19 additions & 12 deletions bootstraprag/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
from pathlib import Path
import os
import zipfile
from InquirerPy import inquirer


@click.group()
def cli():
pass


# used for downloading the project as zip.
def create_zip(project_name):
zip_path = shutil.make_archive(project_name, 'zip', project_name)
Expand All @@ -17,29 +19,34 @@ def create_zip(project_name):

@click.command()
@click.argument('project_name')
@click.option('--framework', type=click.Choice(['llamaindex', 'None']),
prompt="Which technology would you like to use ('None' will make you to use qdrant direct search)?",
default='', required=False)
@click.option('--framework', type=click.Choice([]),prompt=False)
@click.option('--template', type=click.Choice([]), prompt=False)
@click.option('--observability', type=click.Choice([]), prompt=False)
def create(project_name, framework, template, observability):
template_choices = []
observability_choices = []

framework_choices = ['llamaindex', 'None']
framework = inquirer.select(
message="Which technology would you like to use ('None' will make you to use qdrant direct search)?",
choices=framework_choices
).execute()
if framework == 'llamaindex' or framework == 'langchain' or framework == 'haystack':
template_choices = ['simple-rag', 'rag-with-react', 'rag-with-hyde', 'rag-with-flare']
elif framework == 'None':
framework = 'qdrant'
template_choices = ['simple-search']

template = click.prompt("Which template would you like to use?",
type=click.Choice(template_choices)
)
# Use InquirerPy to select template with arrow keys
template = inquirer.select(
message="Which template would you like to use?",
choices=template_choices,
).execute()
if framework == 'llamaindex' or framework == 'langchain' or framework == 'haystack':
observability_choices = ['Yes', 'No']
observability = click.prompt("Do you wish to enable observability?",
type=click.Choice(observability_choices)
)
# Use InquirerPy to select observability with arrow keys
observability = inquirer.select(
message="Do you wish to enable observability?",
choices=observability_choices,
).execute()

click.echo(f'You have selected framework: {framework} and template: {template} and observability: {observability}')
download_and_extract_template(project_name, framework, template, observability)
Expand Down Expand Up @@ -67,4 +74,4 @@ def download_and_extract_template(project_name, framework, template, observabili
cli.add_command(create)

if __name__ == "__main__":
cli()
cli()
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from llama_agents import (
AgentService,
AgentOrchestrator,
ControlPlaneServer,
SimpleMessageQueue,
)

from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI


# create an agent
def get_the_secret_fact() -> str:
"""Returns the secret fact."""
return "The secret fact is: A baby llama is called a 'Cria'."


tool = FunctionTool.from_defaults(fn=get_the_secret_fact)

agent1 = ReActAgent.from_tools([tool], llm=OpenAI())
agent2 = ReActAgent.from_tools([], llm=OpenAI())

# create our multi-agent framework components
message_queue = SimpleMessageQueue(port=8000)
control_plane = ControlPlaneServer(
message_queue=message_queue,
orchestrator=AgentOrchestrator(llm=OpenAI(model="gpt-4-turbo")),
port=8001,
)
agent_server_1 = AgentService(
agent=agent1,
message_queue=message_queue,
description="Useful for getting the secret fact.",
service_name="secret_fact_agent",
port=8002,
)
agent_server_2 = AgentService(
agent=agent2,
message_queue=message_queue,
description="Useful for getting random dumb facts.",
service_name="dumb_fact_agent",
port=8003,
)
16 changes: 16 additions & 0 deletions bootstraprag/templates/llamaindex/llama_agents_simpleq/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from llama_agents import LocalLauncher
import nest_asyncio
from agents_core import agent_server_1, agent_server_2, control_plane, message_queue

# needed for running in a notebook
nest_asyncio.apply()

# launch it
launcher = LocalLauncher(
[agent_server_1, agent_server_2],
control_plane,
message_queue,
)
result = launcher.launch_single("What is the secret fact?")

print(f"Result: {result}")
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
llama-agents==0.0.14
llama-index==0.10.65
llama-index-llms-openai==0.1.29
llama-index-llms-ollama==0.2.2
llama-index-embeddings-openai==0.1.11
llama-index-embeddings-ollama==0.2.0
llama-index-vector-stores-qdrant==0.2.16
qdrant-client==1.11.0
fastapi==0.112.1
uvicorn==0.30.6
16 changes: 8 additions & 8 deletions bootstraprag/templates/llamaindex/simple_rag/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
python-dotenv==1.0.1
llama-index==0.10.58
llama-index-llms-openai==0.1.27
llama-index-llms-ollama==0.2.0
llama-index==0.10.65
llama-index-llms-openai==0.1.29
llama-index-llms-ollama==0.2.2
llama-index-embeddings-openai==0.1.11
llama-index-embeddings-ollama==0.1.2
llama-index-vector-stores-qdrant==0.2.14
qdrant-client==1.10.1
fastapi==0.112.0
uvicorn==0.30.5
llama-index-embeddings-ollama==0.2.0
llama-index-vector-stores-qdrant==0.2.16
qdrant-client==1.11.0
fastapi==0.112.1
uvicorn==0.30.6
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
click==8.1.7
python-dotenv==1.0.1
python-dotenv==1.0.1
inquirerpy==0.3.4