diff --git a/confirm-action/main.py b/confirm-action/main.py index 986075e54..3e2f1b8bd 100644 --- a/confirm-action/main.py +++ b/confirm-action/main.py @@ -4,32 +4,32 @@ @cl.action_callback("confirm_action") -def on_action(action: cl.Action): +async def on_action(action: cl.Action): if action.value == "ok": content = "Confirmed!" elif action.value == "not_ok": content = "Rejected!" else: - cl.ErrorMessage(content="Invalid action").send() + await cl.ErrorMessage(content="Invalid action").send() return actions = cl.user_session.get("actions") if actions: for action in actions: - action.remove() + await action.remove() cl.user_session.set("actions", None) - cl.Message(content=content).send() + await cl.Message(content=content).send() @cl.on_chat_start -def start(): +async def start(): approve_action = cl.Action(name="confirm_action", value="ok", label="Confirm") reject_action = cl.Action(name="confirm_action", value="not_ok", label="Reject") actions = [approve_action, reject_action] cl.user_session.set("actions", actions) - cl.Message( + await cl.Message( content="Test message", actions=actions, ).send() diff --git a/image-gen/app.py b/image-gen/app.py index 1954af37c..2d0a2063b 100644 --- a/image-gen/app.py +++ b/image-gen/app.py @@ -1,5 +1,6 @@ import chainlit as cl from chainlit.action import Action + from tools import generate_image_tool, edit_image_tool from langchain.agents import initialize_agent, AgentType from langchain.chat_models import ChatOpenAI @@ -8,11 +9,11 @@ @cl.action_callback("Create variation") -def create_variant(action: Action): +async def create_variant(action: Action): agent = cl.user_session.get("agent") agent_input = f"Create a variation of {action.value}" - cl.Message(content=f"Creating a variation of `{action.value}`.").send() - run(agent, agent_input) + await cl.Message(content=f"Creating a variation of `{action.value}`.").send() + await run(agent, agent_input) @cl.langchain_rename @@ -23,7 +24,7 @@ def rename(orig_author): return mapping.get(orig_author, orig_author) -@cl.langchain_factory +@cl.langchain_factory(use_async=False) def main(): llm = ChatOpenAI(temperature=0, streaming=True) tools = [generate_image_tool, edit_image_tool] @@ -45,9 +46,13 @@ def main(): @cl.langchain_run -def run(agent_executor, action_input): +async def run(agent_executor, action_input): cl.user_session.set("generated_image", None) - res = agent_executor.run(input=action_input) + + # No async implementation in the Stability AI client, fallback to sync + res = await cl.make_async(agent_executor.run)( + input=action_input, callbacks=[cl.ChainlitCallbackHandler()] + ) elements = [] actions = [] @@ -56,7 +61,7 @@ def run(agent_executor, action_input): generated_image = cl.user_session.get(generated_image_name) if generated_image: elements = [ - cl.LocalImage( + cl.Image( content=generated_image, name=generated_image_name, display="inline", @@ -64,4 +69,4 @@ def run(agent_executor, action_input): ] actions = [cl.Action(name="Create variation", value=generated_image_name)] - cl.Message(content=res, elements=elements, actions=actions).send() + await cl.Message(content=res, elements=elements, actions=actions).send() diff --git a/langchain-aiplugins/app.py b/langchain-aiplugins/app.py index f7535b29e..8f1248252 100644 --- a/langchain-aiplugins/app.py +++ b/langchain-aiplugins/app.py @@ -5,7 +5,7 @@ from chainlit import langchain_factory -@langchain_factory +@langchain_factory(use_async=True) def load(): tool = AIPluginTool.from_plugin_url( "https://www.klarna.com/.well-known/ai-plugin.json" diff --git a/pdf-qa/app.py b/pdf-qa/app.py index e60a1f9b9..66968d5da 100644 --- a/pdf-qa/app.py +++ b/pdf-qa/app.py @@ -1,4 +1,5 @@ import os + from langchain.document_loaders import PyPDFLoader, TextLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings @@ -6,6 +7,7 @@ from langchain.chains import RetrievalQAWithSourcesChain from langchain.chat_models import ChatOpenAI import pinecone + import chainlit as cl from chainlit.types import AskFileResponse @@ -44,16 +46,7 @@ def process_file(file: AskFileResponse): return docs -@cl.langchain_factory -def langchain_factory(): - file = None - while file is None: - file = cl.AskFileMessage( - content=welcome_message, - accept=["text/plain", "application/pdf"], - timeout=180, - ).send() - +def get_docsearch(file: AskFileResponse): docs = process_file(file) # Save data in the user session @@ -72,6 +65,28 @@ def langchain_factory(): ) namespaces.add(namespace) + return docsearch + + +@cl.langchain_factory(use_async=True) +async def langchain_factory(): + files = None + while files is None: + files = await cl.AskFileMessage( + content=welcome_message, + accept=["text/plain", "application/pdf"], + max_size_mb=20, + timeout=180, + ).send() + + file = files[0] + + msg = cl.Message(content=f"Processing `{file.name}`...") + await msg.send() + + # No async implementation in the Pinecone client, fallback to sync + docsearch = await cl.make_async(get_docsearch)(file) + chain = RetrievalQAWithSourcesChain.from_chain_type( ChatOpenAI(temperature=0, streaming=True), chain_type="stuff", @@ -79,13 +94,13 @@ def langchain_factory(): ) # Let the user know that the system is ready - cl.Message(content=f"`{file.name}` uploaded, you can now ask questions!").send() + await msg.update(content=f"`{file.name}` processed. You can now ask questions!") return chain @cl.langchain_postprocess -def process_response(res): +async def process_response(res): answer = res["answer"] sources = res["sources"].strip() source_elements = [] @@ -109,11 +124,11 @@ def process_response(res): text = docs[index].page_content found_sources.append(source_name) # Create the text element referenced in the message - source_elements.append(cl.Text(text=text, name=source_name)) + source_elements.append(cl.Text(content=text, name=source_name)) if found_sources: answer += f"\nSources: {', '.join(found_sources)}" else: answer += "\nNo sources found" - cl.Message(content=answer, elements=source_elements).send() + await cl.Message(content=answer, elements=source_elements).send() diff --git a/pinecone/main.py b/pinecone/app.py similarity index 88% rename from pinecone/main.py rename to pinecone/app.py index 17dd79e9b..c52bdc803 100644 --- a/pinecone/main.py +++ b/pinecone/app.py @@ -22,9 +22,9 @@ welcome_message = "Welcome to the Chainlit Pinecone demo! Ask anything about documents you vectorized and stored in your Pinecone DB." -@cl.langchain_factory -def langchain_factory(): - cl.Message(content=welcome_message).send() +@cl.langchain_factory(use_async=True) +async def langchain_factory(): + await cl.Message(content=welcome_message).send() docsearch = Pinecone.from_existing_index( index_name=index_name, embedding=embeddings, namespace=namespace ) @@ -39,7 +39,7 @@ def langchain_factory(): @cl.langchain_postprocess -def process_response(res): +async def process_response(res): answer = res["answer"] sources = res.get("sources", "").strip() # Use the get method with a default value source_elements = [] @@ -67,7 +67,7 @@ def process_response(res): text = docs[found_index].page_content found_sources.append(clean_source_name) - source_elements.append(cl.Text(text=text, name=clean_source_name)) + source_elements.append(cl.Text(content=text, name=clean_source_name)) if found_sources: # Add the sources to the answer, referencing the text elements @@ -76,4 +76,4 @@ def process_response(res): answer += "\nNo sources found" # Send the answer and the text elements to the UI - cl.Message(content=answer, elements=source_elements).send() + await cl.Message(content=answer, elements=source_elements).send()