Skip to content

Commit

Permalink
fix+feat: Multiple fixes and changes.
Browse files Browse the repository at this point in the history
1. Fixed the error where the user message was not being passed resulting in a vector retriever exception.

2. Fixed the duplication of the snackbar message. Now, handled by the app layout.

3. Added the option to gather user feedback on the latest chatbot response. Feedback is not yet submitted to Langfuse.
  • Loading branch information
anirbanbasu committed May 21, 2024
1 parent c45f167 commit 1b65fc5
Show file tree
Hide file tree
Showing 4 changed files with 99 additions and 68 deletions.
10 changes: 10 additions & 0 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

from typing import Any
import solara
from solara.alias import rv

from pages import chatbot, ingest, settings
from utils import global_state
Expand Down Expand Up @@ -44,6 +45,15 @@ def CustomLayout(children: Any = []):
):
solara.v.Icon(children=["mdi-github-circle"])
solara.lab.ThemeToggle()
with rv.Snackbar(
bottom=True,
left=True,
timeout=0,
multi_line=True,
color=global_state.status_message_colour.value,
v_model=global_state.status_message_show.value,
):
solara.Markdown(f"{global_state.status_message.value}")
return app_layout


Expand Down
118 changes: 77 additions & 41 deletions pages/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@
user_chat_input: solara.Reactive[str] = solara.Reactive(constants.EMPTY_STRING)
exported_chat_json: solara.Reactive[str] = solara.Reactive(constants.EMPTY_STRING)
last_response_ai: solara.Reactive[StreamingAgentChatResponse] = solara.Reactive(None)
ai_response_feedback_score: solara.Reactive[float] = solara.Reactive(0.0)
ai_response_feedback_comment: solara.Reactive[str] = solara.Reactive(
constants.EMPTY_STRING
)


def stream_wrapper(streaming_response):
Expand Down Expand Up @@ -81,6 +85,12 @@ def no_chat_engine_message():
},
]

def submit_feedback(callback_arg=None):
show_status_message(
"Feedback submission has not been enabled yet. Please check back later.",
colour="warning",
)

def add_chunk_to_ai_message(chunk: str):
global_state.global_chat_messages.value = [
*global_state.global_chat_messages.value[:-1],
Expand All @@ -98,27 +108,29 @@ def add_chunk_to_ai_message(chunk: str):
},
]

def ask_tldrlc(message):
if len(global_state.global_chat_messages.value) == 1:
# Remove the only not-initialised status message from the chatbot
global_state.global_chat_messages.value = [
{
constants.CHAT_KEY_ROLE: constants.CHAT_KEY_VALUE_USER,
constants.CHAT_KEY_CONTENT: message,
constants.CHAT_KEY_TIMESTAMP: f"{datetime.datetime.now()}",
},
]
else:
global_state.global_chat_messages.value = [
*global_state.global_chat_messages.value,
{
constants.CHAT_KEY_ROLE: constants.CHAT_KEY_VALUE_USER,
constants.CHAT_KEY_CONTENT: message,
constants.CHAT_KEY_TIMESTAMP: f"{datetime.datetime.now()}",
},
]
last_response_ai.value = None
user_chat_input.value = constants.EMPTY_STRING
def ask_tldrlc(callback_arg=None):
message = user_chat_input.value
if message is not None and len(message) > 0:
if len(global_state.global_chat_messages.value) == 1:
# Remove the only not-initialised status message from the chatbot
global_state.global_chat_messages.value = [
{
constants.CHAT_KEY_ROLE: constants.CHAT_KEY_VALUE_USER,
constants.CHAT_KEY_CONTENT: message,
constants.CHAT_KEY_TIMESTAMP: f"{datetime.datetime.now()}",
},
]
else:
global_state.global_chat_messages.value = [
*global_state.global_chat_messages.value,
{
constants.CHAT_KEY_ROLE: constants.CHAT_KEY_VALUE_USER,
constants.CHAT_KEY_CONTENT: message,
constants.CHAT_KEY_TIMESTAMP: f"{datetime.datetime.now()}",
},
]
last_response_ai.value = None
user_chat_input.value = constants.EMPTY_STRING

def call_chat_engine():
try:
Expand Down Expand Up @@ -163,15 +175,15 @@ def call_chat_engine():
"TL;DR Let's Chat",
)

with rv.Snackbar(
top=True,
right=True,
timeout=0,
multi_line=True,
color=global_state.status_message_colour.value,
v_model=global_state.status_message_show.value,
):
solara.Markdown(f"{global_state.status_message.value}")
# with rv.Snackbar(
# top=True,
# right=True,
# timeout=0,
# multi_line=True,
# color=global_state.status_message_colour.value,
# v_model=global_state.status_message_show.value,
# ):
# solara.Markdown(f"{global_state.status_message.value}")

with solara.lab.ConfirmationDialog(
open=global_state.show_eu_ai_act_notice,
Expand Down Expand Up @@ -266,6 +278,40 @@ def call_chat_engine():
"padding-top": "1em",
},
)
if (
item[constants.CHAT_KEY_ROLE]
== constants.CHAT_KEY_VALUE_ASSISTANT
and item == global_state.global_chat_messages.value[-1]
and task_get_chat_response.finished
and len(global_state.global_chat_messages.value) > 1
and global_state.global_settings_langfuse_enabled.value
):
with rv.ExpansionPanels():
with rv.ExpansionPanel():
with rv.ExpansionPanelHeader():
solara.Markdown(
":monocle_face: _How did I do?_"
)
with rv.ExpansionPanelContent():
solara.SliderFloat(
label="Score",
tick_labels=False,
step=0.1,
min=-1.0,
max=1.0,
value=ai_response_feedback_score,
)
solara.InputText(
label="Comments",
style={"width": "100%"},
value=ai_response_feedback_comment,
)
solara.Button(
label="Submit",
color="success",
outlined=True,
on_click=submit_feedback,
)
if task_get_chat_response.pending:
solara.Markdown(":thinking: _Thinking of a response..._")
if exported_chat_json.value:
Expand All @@ -277,27 +323,17 @@ def call_chat_engine():
"""
)
with solara.Row():
# solara.lab.ChatInput(
# send_callback=ask_tldrlc,
# disabled=task_get_chat_response.pending,
# style={"width": "100%"},
# )
solara.InputText(
label="Type your message here...",
style={"width": "100%"},
value=user_chat_input,
update_events=["keyup.enter"],
on_value=ask_tldrlc,
disabled=task_get_chat_response.pending,
)
solara.Button(
label="Ask",
icon_name="mdi-send",
on_click=lambda: (
ask_tldrlc(message=user_chat_input.value)
if user_chat_input.value
else None
),
on_click=ask_tldrlc,
color="success",
disabled=task_get_chat_response.pending,
# outlined=True,
Expand Down
33 changes: 7 additions & 26 deletions pages/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,34 +134,25 @@ def initialise_chat_engine() -> bool:

if (
global_state.global_knowledge_graph_index.value is not None
and global_state.global_knowledge_vector_index.value is not None
and global_state.global_semantic_search_index.value is not None
):
global_state.global_chat_engine.value = None
if global_state.global_llamaindex_chat_memory.value is not None:
global_state.global_llamaindex_chat_memory.value.reset()
show_status_message(
message=f"**Initialising chat engine** from index using the _{global_state.global_settings__index_chat_mode.value}_ chat mode."
)
# global_state.global_chat_engine.value = (
# global_state.global_knowledge_graph_index.value.as_chat_engine(
# chat_mode=global_state.global_settings__index_chat_mode.value,
# llm=Settings.llm,
# verbose=True,
# memory=global_state.global_llamaindex_chat_memory.value,
# system_prompt=global_state.global_settings__llm_system_message.value,
# node_postprocessors=post_processors,
# streaming=True,
# )
# )
kg_retriever = KGTableRetriever(
index=global_state.global_knowledge_graph_index.value,
embed_model=Settings.embed_model,
retriever_mode="hybrid",
graph_store_query_depth=2,
similarity_top_k=2,
verbose=True,
)
vector_retriever = VectorIndexRetriever(
index=global_state.global_knowledge_vector_index.value,
index=global_state.global_semantic_search_index.value,
embed_model=Settings.embed_model,
verbose=True,
)
retriever = VectorKnowledgeGraphRetriever(
Expand Down Expand Up @@ -262,7 +253,7 @@ def build_index_pipeline() -> bool:
message=f"**Building semantic search index** from {len(chunk_nodes)} chunks extracted from {len(ingested_documents.value)} document(s).",
timeout=0,
)
global_state.global_knowledge_vector_index.value = VectorStoreIndex(
global_state.global_semantic_search_index.value = VectorStoreIndex(
nodes=chunk_nodes,
embed_model=Settings.embed_model,
show_progress=True,
Expand Down Expand Up @@ -526,7 +517,7 @@ async def load_existing_indices():
show_status_message(
message=f"**Loading semantic search index** with ID _{existing_vector_index.value}_.",
)
global_state.global_knowledge_vector_index.value = load_index_from_storage(
global_state.global_semantic_search_index.value = load_index_from_storage(
storage_context=global_state.global_llamaindex_storage_context.value,
index_id=existing_vector_index.value,
)
Expand Down Expand Up @@ -905,16 +896,6 @@ def Page():
with solara.AppBarTitle():
solara.Text("Ingest data")

with rv.Snackbar(
top=True,
right=True,
timeout=0,
multi_line=True,
color=global_state.status_message_colour.value,
v_model=global_state.status_message_show.value,
):
solara.Markdown(f"{global_state.status_message.value}")

if (
global_state.global_settings__llm_provider_notice.value
is not constants.EMPTY_STRING
Expand All @@ -923,7 +904,7 @@ def Page():
icon=True, label=global_state.global_settings__llm_provider_notice.value
)

with rv.ExpansionPanels(popout=True, hover=True, accordion=True):
with rv.ExpansionPanels(popout=True, hover=True, accordion=True, tabbable=False):
disable_index_loading = (
global_state.global_settings__neo4j_disable.value
or global_state.global_settings__redis_disable.value
Expand Down
6 changes: 5 additions & 1 deletion utils/global_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def show_status_message(message: str, colour: str = "info", timeout: int = 4):

""" General settings """
global_settings_initialised: solara.Reactive[bool] = solara.reactive(False)
global_settings_langfuse_enabled: solara.Reactive[bool] = solara.reactive(False)

""" Document ingestion pipeline cache """
global_cache__ingestion: solara.Reactive[RedisCache] = solara.reactive(None)
Expand Down Expand Up @@ -209,7 +210,7 @@ class MessageDict(TypedDict):
global_knowledge_graph_index: solara.Reactive[KnowledgeGraphIndex] = solara.reactive(
None
)
global_knowledge_vector_index: solara.Reactive[VectorStoreIndex] = solara.reactive(None)
global_semantic_search_index: solara.Reactive[VectorStoreIndex] = solara.reactive(None)
global_chat_engine: solara.Reactive[BaseChatEngine] = solara.reactive(None)
global_chat_messages: solara.Reactive[List[MessageDict]] = solara.reactive([])

Expand Down Expand Up @@ -255,14 +256,17 @@ def setup_langfuse():
tags=langfuse_trace_tags,
)
Settings.callback_manager = CallbackManager([langfuse_callback_handler])
global_settings_langfuse_enabled.value = True
logger.warning(
f"Using Langfuse at {langfuse__host} for performance evaluation."
)
except Exception as langfuse_e:
logger.error(f"{langfuse_e} Langfuse setup failed. Disabling Langfuse.")
global_settings_langfuse_enabled.value = False
Settings.callback_manager = None
else:
Settings.callback_manager = None
global_settings_langfuse_enabled.value = False
logger.warning("Not using Langfuse for performance evaluation.")


Expand Down

0 comments on commit 1b65fc5

Please sign in to comment.