Skip to content

Commit

Permalink
add agent chat pack
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich committed Nov 22, 2023
1 parent 71394c0 commit 34e4941
Show file tree
Hide file tree
Showing 9 changed files with 235 additions and 1 deletion.
70 changes: 70 additions & 0 deletions llama_hub/llama_packs/gradio_agent_chat/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Gradio Chat With Your LlamaIndex Agent

Create a LlamaIndex Agent (i.e., `BaseAgent`) and quickly chat with it using
this pack's Gradio Chatbot interface.

<p text-align="center">
<figure>
<img src="chat-with-your-agent-dark.png" width="75%" />
<figcaption>Dark mode</figcaption>
</figure>
</p>
<p text-align="center">
<figure>
<img src="chat-with-your-agent-light.png" width="75%" />
<figcaption>Light mode</figcaption>
</figure>
</p>

## Usage

You can download the pack to a `./gradio_agent_chat_pack` directory:

`sample.py`
```python
from llama_index.llama_packs import download_llama_pack
from llama_index.agent import OpenAIAgent
from llama_index.llms import OpenAI
from llama_index.tools import FunctionTool


def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b


def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b


multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)

# Works with any BaseAgent
agent = OpenAIAgent.from_tools(
tools=[multiply_tool, add_tool],
llm=OpenAI(model="gpt-3.5-turbo-1106"),
verbose=True # thoughts are displayed in the Gradio interface!
)

# download and install dependencies
GradioAgentChatPack = download_llama_pack(
"GradioAgentChatPack", "./gradio_agent_chat_pack"
)

gradio_agent_chat_pack = GradioAgentChatPack(agent=agent)

if __name__ == "__main__":
gradio_agent_chat_pack.run()
```

From here, you can use the pack, or inspect and modify the pack in `./gradio_agent_chat_pack`.

The `run()` function is a light wrapper around `gr.demo.launch()`. To run the
app directly, use in your terminal:

```bash
export OPENAI_API_KEY="sk-..."
python ./gradio_agent_chat/sample.py
```
3 changes: 3 additions & 0 deletions llama_hub/llama_packs/gradio_agent_chat/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from llama_hub.llama_packs.gradio_agent_chat.base import GradioAgentChatPack

__all__ = ["GradioAgentChatPack"]
149 changes: 149 additions & 0 deletions llama_hub/llama_packs/gradio_agent_chat/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
from typing import Dict, Any, List, Tuple

from llama_index.llama_pack.base import BaseLlamaPack
from llama_index.agent.types import BaseAgent

from io import StringIO
import sys


class Capturing(list):
"""To capture the stdout from `BaseAgent.stream_chat` with `verbose=True`. Taken from
https://stackoverflow.com/questions/16571150/\
how-to-capture-stdout-output-from-a-python-function-call
"""

def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self

def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout


class GradioAgentChatPack(BaseLlamaPack):
"""Gradio chatbot to chat with your own Agent."""

def __init__(
self,
agent: BaseAgent,
**kwargs: Any,
) -> None:
"""Init params."""
try:
from ansi2html import Ansi2HTMLConverter
except ImportError:
raise ImportError("Please install ansi2html via `pip install ansi2html`")

self.agent = agent
self.thoughts = ""
self.conv = Ansi2HTMLConverter()

def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"agent": self.agent}

def _handle_user_message(self, user_message, history):
"""Handle the user submitted message. Clear message box, and append
to the history."""
return "", history + [(user_message, "")]

def _generate_response(
self, chat_history: List[Tuple[str, str]]
) -> Tuple[str, List[Tuple[str, str]]]:
"""Generate the response from agent, and capture the stdout of the
ReActAgent's thoughts.
"""
with Capturing() as output:
response = self.agent.stream_chat(chat_history[-1][0])
ansi = "\n========\n".join(output)
html_output = self.conv.convert(ansi)
for token in response.response_gen:
chat_history[-1][1] += token
yield chat_history, str(html_output)

def _reset_chat(self) -> Tuple[str, str]:
"""Reset the agent's chat history. And clear all dialogue boxes."""
# clear agent history
self.agent.reset()
return "", "", "" # clear textboxes

def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
import gradio as gr
from gradio.themes.utils import fonts, colors, sizes

llama_theme = gr.themes.Soft(
primary_hue=colors.purple,
secondary_hue=colors.pink,
neutral_hue=colors.gray,
spacing_size=sizes.spacing_md,
radius_size=sizes.radius_md,
text_size=sizes.text_lg,
font=(
fonts.GoogleFont("Quicksand"),
"ui-sans-serif",
"sans-serif",
),
font_mono=(
fonts.GoogleFont("IBM Plex Mono"),
"ui-monospace",
"monospace",
),
)
llama_theme.set(
body_background_fill="#FFFFFF",
body_background_fill_dark="#000000",
button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
button_primary_text_color="white",
button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
slider_color="*secondary_300",
slider_color_dark="*secondary_600",
block_title_text_weight="600",
block_border_width="3px",
block_shadow="*shadow_drop_lg",
button_shadow="*shadow_drop_lg",
button_large_padding="32px",
)

demo = gr.Blocks(
theme=llama_theme,
css="#box { height: 420px; overflow-y: scroll !important} #logo { align-self: right }",
)
with demo:
with gr.Row():
gr.Markdown(
"# Gradio Chat With Your Agent Powered by LlamaIndex and LlamaHub 🦙\n"
"This Gradio app allows you to chat with your own agent (`BaseAgent`).\n"
)
gr.Markdown(
"[![Alt text](https://d3ddy8balm3goa.cloudfront.net/other/llama-index-light-transparent-sm-font.svg)](https://llamaindex.ai)",
elem_id="logo",
)
with gr.Row():
chat_window = gr.Chatbot(
label="Message History",
scale=3,
)
console = gr.HTML(elem_id="box")
with gr.Row():
message = gr.Textbox(label="Write A Message", scale=4)
clear = gr.ClearButton()

message.submit(
self._handle_user_message,
[message, chat_window],
[message, chat_window],
queue=False,
).then(
self._generate_response,
chat_window,
[chat_window, console],
)
clear.click(self._reset_chat, None, [message, chat_window, console])

demo.launch(server_name="0.0.0.0", server_port=8080)
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 3 additions & 0 deletions llama_hub/llama_packs/gradio_agent_chat/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
llama-hub
gradio
ansi2html
2 changes: 1 addition & 1 deletion llama_hub/llama_packs/gradio_react_agent_chatbot/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ additionally the agent's thoughts are captured in an `HTML` Block.
You can download llamapacks directly using `llamaindex-cli`, which comes installed with the `llama-index` python package:

```bash
llamaindex-cli download-llamapack GradioReactAgentPack --download-dir ./gradio_react_agent_chatbot
llamaindex-cli download-llamapack GradioReActAgentPack --download-dir ./gradio_react_agent_chatbot
```

You can then inspect the files at `./gradio_react_agent_chatbot` and use them as a template for your own project!
Expand Down
3 changes: 3 additions & 0 deletions llama_hub/llama_packs/gradio_react_agent_chatbot/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from llama_hub.llama_packs.gradio_react_agent_chatbot.base import GradioReActAgentPack

__all__ = ["GradioReActAgentPack"]
6 changes: 6 additions & 0 deletions llama_hub/llama_packs/library.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,12 @@
"author": "andrei-fajardo",
"keywords": ["gradio", "react-agent", "chatbot", "tools"]
},
"GradioAgentChatPack": {
"id": "llama_packs/gradio_agent_chat",
"author": "andrei-fajardo",
"keywords": ["gradio", "agent", "chatbot", "tools"]
},
}
"WeaviateSubQuestionPack": {
"id": "llama_packs/sub_question_weaviate",
"author": "erika-cardenas",
Expand Down

0 comments on commit 34e4941

Please sign in to comment.