Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions confirm-action/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,32 +4,32 @@


@cl.action_callback("confirm_action")
def on_action(action: cl.Action):
async def on_action(action: cl.Action):
if action.value == "ok":
content = "Confirmed!"
elif action.value == "not_ok":
content = "Rejected!"
else:
cl.ErrorMessage(content="Invalid action").send()
await cl.ErrorMessage(content="Invalid action").send()
return

actions = cl.user_session.get("actions")
if actions:
for action in actions:
action.remove()
await action.remove()
cl.user_session.set("actions", None)

cl.Message(content=content).send()
await cl.Message(content=content).send()


@cl.on_chat_start
def start():
async def start():
approve_action = cl.Action(name="confirm_action", value="ok", label="Confirm")
reject_action = cl.Action(name="confirm_action", value="not_ok", label="Reject")
actions = [approve_action, reject_action]
cl.user_session.set("actions", actions)

cl.Message(
await cl.Message(
content="Test message",
actions=actions,
).send()
21 changes: 13 additions & 8 deletions image-gen/app.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import chainlit as cl
from chainlit.action import Action

from tools import generate_image_tool, edit_image_tool
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
Expand All @@ -8,11 +9,11 @@


@cl.action_callback("Create variation")
def create_variant(action: Action):
async def create_variant(action: Action):
agent = cl.user_session.get("agent")
agent_input = f"Create a variation of {action.value}"
cl.Message(content=f"Creating a variation of `{action.value}`.").send()
run(agent, agent_input)
await cl.Message(content=f"Creating a variation of `{action.value}`.").send()
await run(agent, agent_input)


@cl.langchain_rename
Expand All @@ -23,7 +24,7 @@ def rename(orig_author):
return mapping.get(orig_author, orig_author)


@cl.langchain_factory
@cl.langchain_factory(use_async=False)
def main():
llm = ChatOpenAI(temperature=0, streaming=True)
tools = [generate_image_tool, edit_image_tool]
Expand All @@ -45,9 +46,13 @@ def main():


@cl.langchain_run
def run(agent_executor, action_input):
async def run(agent_executor, action_input):
cl.user_session.set("generated_image", None)
res = agent_executor.run(input=action_input)

# No async implementation in the Stability AI client, fallback to sync
res = await cl.make_async(agent_executor.run)(
input=action_input, callbacks=[cl.ChainlitCallbackHandler()]
)

elements = []
actions = []
Expand All @@ -56,12 +61,12 @@ def run(agent_executor, action_input):
generated_image = cl.user_session.get(generated_image_name)
if generated_image:
elements = [
cl.LocalImage(
cl.Image(
content=generated_image,
name=generated_image_name,
display="inline",
)
]
actions = [cl.Action(name="Create variation", value=generated_image_name)]

cl.Message(content=res, elements=elements, actions=actions).send()
await cl.Message(content=res, elements=elements, actions=actions).send()
2 changes: 1 addition & 1 deletion langchain-aiplugins/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from chainlit import langchain_factory


@langchain_factory
@langchain_factory(use_async=True)
def load():
tool = AIPluginTool.from_plugin_url(
"https://www.klarna.com/.well-known/ai-plugin.json"
Expand Down
43 changes: 29 additions & 14 deletions pdf-qa/app.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import os

from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
import pinecone

import chainlit as cl
from chainlit.types import AskFileResponse

Expand Down Expand Up @@ -44,16 +46,7 @@ def process_file(file: AskFileResponse):
return docs


@cl.langchain_factory
def langchain_factory():
file = None
while file is None:
file = cl.AskFileMessage(
content=welcome_message,
accept=["text/plain", "application/pdf"],
timeout=180,
).send()

def get_docsearch(file: AskFileResponse):
docs = process_file(file)

# Save data in the user session
Expand All @@ -72,20 +65,42 @@ def langchain_factory():
)
namespaces.add(namespace)

return docsearch


@cl.langchain_factory(use_async=True)
async def langchain_factory():
files = None
while files is None:
files = await cl.AskFileMessage(
content=welcome_message,
accept=["text/plain", "application/pdf"],
max_size_mb=20,
timeout=180,
).send()

file = files[0]

msg = cl.Message(content=f"Processing `{file.name}`...")
await msg.send()

# No async implementation in the Pinecone client, fallback to sync
docsearch = await cl.make_async(get_docsearch)(file)

chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(temperature=0, streaming=True),
chain_type="stuff",
retriever=docsearch.as_retriever(max_tokens_limit=4097),
)

# Let the user know that the system is ready
cl.Message(content=f"`{file.name}` uploaded, you can now ask questions!").send()
await msg.update(content=f"`{file.name}` processed. You can now ask questions!")

return chain


@cl.langchain_postprocess
def process_response(res):
async def process_response(res):
answer = res["answer"]
sources = res["sources"].strip()
source_elements = []
Expand All @@ -109,11 +124,11 @@ def process_response(res):
text = docs[index].page_content
found_sources.append(source_name)
# Create the text element referenced in the message
source_elements.append(cl.Text(text=text, name=source_name))
source_elements.append(cl.Text(content=text, name=source_name))

if found_sources:
answer += f"\nSources: {', '.join(found_sources)}"
else:
answer += "\nNo sources found"

cl.Message(content=answer, elements=source_elements).send()
await cl.Message(content=answer, elements=source_elements).send()
12 changes: 6 additions & 6 deletions pinecone/main.py → pinecone/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
welcome_message = "Welcome to the Chainlit Pinecone demo! Ask anything about documents you vectorized and stored in your Pinecone DB."


@cl.langchain_factory
def langchain_factory():
cl.Message(content=welcome_message).send()
@cl.langchain_factory(use_async=True)
async def langchain_factory():
await cl.Message(content=welcome_message).send()
docsearch = Pinecone.from_existing_index(
index_name=index_name, embedding=embeddings, namespace=namespace
)
Expand All @@ -39,7 +39,7 @@ def langchain_factory():


@cl.langchain_postprocess
def process_response(res):
async def process_response(res):
answer = res["answer"]
sources = res.get("sources", "").strip() # Use the get method with a default value
source_elements = []
Expand Down Expand Up @@ -67,7 +67,7 @@ def process_response(res):
text = docs[found_index].page_content

found_sources.append(clean_source_name)
source_elements.append(cl.Text(text=text, name=clean_source_name))
source_elements.append(cl.Text(content=text, name=clean_source_name))

if found_sources:
# Add the sources to the answer, referencing the text elements
Expand All @@ -76,4 +76,4 @@ def process_response(res):
answer += "\nNo sources found"

# Send the answer and the text elements to the UI
cl.Message(content=answer, elements=source_elements).send()
await cl.Message(content=answer, elements=source_elements).send()