diff --git a/CHANGELOG.md b/CHANGELOG.md
index b86c7f9e6..1a09d2d7f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -19,6 +19,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### New features
+* Added a new `shiny.ui.Chat` class for building conversational interfaces with fully customizable and performant response generation. (#1453)
+
* Expose `shiny.playwright`, `shiny.run`, and `shiny.pytest` modules that allow users to testing their Shiny apps. (#1448, #1456, #1481)
* `shiny.playwright` contains `controller` and `expect` submodules. `controller` will contain many classes to interact with (and verify!) your Shiny app using Playwright. `expect` contains expectation functions that enhance standard Playwright expectation methods.
* `shiny.run` contains the `run_shiny_app` command and the return type `ShinyAppProc`. `ShinyAppProc` can be used to type the Shiny app pytest fixtures.
diff --git a/docs/_quartodoc-core.yml b/docs/_quartodoc-core.yml
index 88991a5c3..4bbd133d7 100644
--- a/docs/_quartodoc-core.yml
+++ b/docs/_quartodoc-core.yml
@@ -96,6 +96,11 @@ quartodoc:
- ui.input_file
- ui.download_button
- ui.download_link
+ - title: Chat interface
+ desc: Build a chatbot interface
+ contents:
+ - ui.Chat
+ - ui.chat_ui
- title: Custom UI
desc: Lower-level UI functions for creating custom HTML/CSS/JS
contents:
diff --git a/docs/_quartodoc-express.yml b/docs/_quartodoc-express.yml
index 44df0c422..dbee0b740 100644
--- a/docs/_quartodoc-express.yml
+++ b/docs/_quartodoc-express.yml
@@ -76,6 +76,10 @@ quartodoc:
- express.ui.navset_underline
- express.ui.navset_pill_list
- express.ui.navset_hidden
+ - title: Chat interface
+ desc: Build a chatbot interface
+ contents:
+ - express.ui.Chat
- title: Reactive programming
desc: Create reactive functions and dependencies.
contents:
diff --git a/examples/chat/.gitignore b/examples/chat/.gitignore
new file mode 100644
index 000000000..4c49bd78f
--- /dev/null
+++ b/examples/chat/.gitignore
@@ -0,0 +1 @@
+.env
diff --git a/examples/chat/RAG/recipes/app.py b/examples/chat/RAG/recipes/app.py
new file mode 100644
index 000000000..ca24ad515
--- /dev/null
+++ b/examples/chat/RAG/recipes/app.py
@@ -0,0 +1,57 @@
+# ------------------------------------------------------------------------------------
+# A simple recipe extractor chatbot that extracts recipes from URLs using the OpenAI API.
+# To run it, you'll need an OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# ------------------------------------------------------------------------------------
+import os
+
+from openai import AsyncOpenAI
+from utils import recipe_prompt, scrape_page_with_url
+
+from shiny.express import ui
+
+# Provide your API key here (or set the environment variable)
+llm = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Recipe Extractor Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Initialize the chat (with a system prompt and starting message)
+chat = ui.Chat(
+ id="chat",
+ messages=[
+ {"role": "system", "content": recipe_prompt},
+ {
+ "role": "assistant",
+ "content": "Hello! I'm a recipe extractor. Please enter a URL to a recipe page. For example, ",
+ },
+ ],
+)
+
+chat.ui(placeholder="Enter a recipe URL...")
+
+
+# A function to transform user input
+# Note that, if an exception occurs, the function will return a message to the user
+# "short-circuiting" the conversation and asking the user to try again.
+@chat.transform_user_input
+async def try_scrape_page(input: str) -> str | None:
+ try:
+ return await scrape_page_with_url(input)
+ except Exception:
+ await chat.append_message(
+ "I'm sorry, I couldn't extract content from that URL. Please try again. "
+ )
+ return None
+
+
+@chat.on_user_submit
+async def _():
+ response = await llm.chat.completions.create(
+ model="gpt-4o", messages=chat.messages(), temperature=0, stream=True
+ )
+ await chat.append_message_stream(response)
diff --git a/examples/chat/RAG/recipes/utils.py b/examples/chat/RAG/recipes/utils.py
new file mode 100644
index 000000000..9c522cf32
--- /dev/null
+++ b/examples/chat/RAG/recipes/utils.py
@@ -0,0 +1,106 @@
+import aiohttp
+from bs4 import BeautifulSoup
+
+recipe_prompt = """
+You are RecipeExtractorGPT.
+Your goal is to extract recipe content from text and return a JSON representation of the useful information.
+
+The JSON should be structured like this:
+
+```
+{
+ "title": "Scrambled eggs",
+ "ingredients": {
+ "eggs": "2",
+ "butter": "1 tbsp",
+ "milk": "1 tbsp",
+ "salt": "1 pinch"
+ },
+ "directions": [
+ "Beat eggs, milk, and salt together in a bowl until thoroughly combined.",
+ "Heat butter in a large skillet over medium-high heat. Pour egg mixture into the hot skillet; cook and stir until eggs are set, 3 to 5 minutes."
+ ],
+ "servings": 2,
+ "prep_time": 5,
+ "cook_time": 5,
+ "total_time": 10,
+ "tags": [
+ "breakfast",
+ "eggs",
+ "scrambled"
+ ],
+ "source": "https://recipes.com/scrambled-eggs/",
+}
+```
+
+The user will provide text content from a web page.
+It is not very well structured, but the recipe is in there.
+Please look carefully for the useful information about the recipe.
+IMPORTANT: Return the result as JSON in a Markdown code block surrounded with three backticks!
+"""
+
+
+async def scrape_page_with_url(url: str, max_length: int = 14000) -> str:
+ """
+ Given a URL, scrapes the web page and return the contents. This also adds adds the
+ URL to the beginning of the text.
+
+ Parameters
+ ----------
+ url:
+ The URL to scrape
+ max_length:
+ Max length of recipe text to process. This is to prevent the model from running
+ out of tokens. 14000 bytes translates to approximately 3200 tokens.
+ """
+ contents = await scrape_page(url)
+ # Trim the string so that the prompt and reply will fit in the token limit.. It
+ # would be better to trim by tokens, but that requires using the tiktoken package,
+ # which can be very slow to load when running on containerized servers, because it
+ # needs to download the model from the internet each time the container starts.
+ contents = contents[:max_length]
+ return f"From: {url}\n\n" + contents
+
+
+async def scrape_page(url: str) -> str:
+ # Asynchronously send an HTTP request to the URL.
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url) as response:
+ if response.status != 200:
+ raise aiohttp.ClientError(f"An error occurred: {response.status}")
+ html = await response.text()
+
+ # Parse the HTML content using BeautifulSoup
+ soup = BeautifulSoup(html, "html.parser")
+
+ # Remove script and style elements
+ for script in soup(["script", "style"]):
+ script.decompose()
+
+ # List of element IDs or class names to remove
+ elements_to_remove = [
+ "header",
+ "footer",
+ "sidebar",
+ "nav",
+ "menu",
+ "ad",
+ "advertisement",
+ "cookie-banner",
+ "popup",
+ "social",
+ "breadcrumb",
+ "pagination",
+ "comment",
+ "comments",
+ ]
+
+ # Remove unwanted elements by ID or class name
+ for element in elements_to_remove:
+ for e in soup.find_all(id=element) + soup.find_all(class_=element):
+ e.decompose()
+
+ # Extract text from the remaining HTML tags
+ text = " ".join(soup.stripped_strings)
+
+ return text
diff --git a/examples/chat/README.md b/examples/chat/README.md
new file mode 100644
index 000000000..c5a366394
--- /dev/null
+++ b/examples/chat/README.md
@@ -0,0 +1,5 @@
+# Shiny `Chat` examples
+
+This folder contains a collection of examples illustrating `shiny.ui.Chat` usage. Many of them require API keys from providers such as OpenAI, Anthropic, etc. In those cases, the example should have commentary explaining how to obtain keys as well as how to provide them to the app.
+
+To get started with an app that doesn't require an API key, see the `hello-world` example. This example has both a Shiny Core and Express app to illustrate how it's used in either mode.
diff --git a/examples/chat/enterprise/aws-bedrock-anthropic/app.py b/examples/chat/enterprise/aws-bedrock-anthropic/app.py
new file mode 100644
index 000000000..51443cbe4
--- /dev/null
+++ b/examples/chat/enterprise/aws-bedrock-anthropic/app.py
@@ -0,0 +1,47 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat powered by Anthropic's Claude model with Bedrock.
+# To run it, you'll need an AWS Bedrock configuration.
+# To get started, follow the instructions at https://aws.amazon.com/bedrock/claude/
+# as well as https://github.com/anthropics/anthropic-sdk-python#aws-bedrock
+# ------------------------------------------------------------------------------------
+from anthropic import AnthropicBedrock
+
+from shiny.express import ui
+
+# Although you can set the AWS credentials here, it's recommended to put them in an .env
+# file and load them with `dotenv` so your keys aren't exposed with your code.
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = AnthropicBedrock(
+ # aws_secret_key="..."
+ # aws_access_key="..."
+ # aws_region="..."
+ # aws_account_id="..."
+)
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Anthropic Claude Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create and display empty chat
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = await llm.messages.create(
+ model="anthropic.claude-3-sonnet-20240229-v1:0",
+ messages=messages,
+ stream=True,
+ max_tokens=1000,
+ )
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/enterprise/azure-openai/app.py b/examples/chat/enterprise/azure-openai/app.py
new file mode 100644
index 000000000..ad9b25951
--- /dev/null
+++ b/examples/chat/enterprise/azure-openai/app.py
@@ -0,0 +1,55 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by OpenAI running on Azure.
+# To run it, you'll need OpenAI API key.
+# To get setup, follow the instructions at https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line%2Cpython-new&pivots=programming-language-python#create-a-new-python-application
+# ------------------------------------------------------------------------------------
+import os
+
+from openai import AzureOpenAI
+
+from shiny.express import ui
+
+# Although you can set API keys here, it's recommended to put it in an .env file
+# and load it with `dotenv` so your keys aren't exposed with your code.
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = AzureOpenAI(
+ api_key=os.getenv("AZURE_OPENAI_API_KEY"),
+ api_version="2024-02-01",
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
+)
+
+deployment_name = "REPLACE_WITH_YOUR_DEPLOYMENT_NAME"
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello OpenAI Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a chat instance, with an initial message
+chat = ui.Chat(
+ id="chat",
+ messages=[
+ {"content": "Hello! How can I help you today?", "role": "assistant"},
+ ],
+)
+
+# Display the chat
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = await llm.chat.completions.create(
+ model=deployment_name,
+ messages=messages,
+ stream=True,
+ )
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-providers/anthropic/app.py b/examples/chat/hello-providers/anthropic/app.py
new file mode 100644
index 000000000..8307d5cf1
--- /dev/null
+++ b/examples/chat/hello-providers/anthropic/app.py
@@ -0,0 +1,43 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by Anthropic's Claude model.
+# To run it, you'll need an Anthropic API key.
+# To get one, follow the instructions at https://docs.anthropic.com/en/api/getting-started
+# ------------------------------------------------------------------------------------
+import os
+
+from anthropic import AsyncAnthropic
+
+from shiny.express import ui
+
+# Although you can set the API key here, it's recommended to put it in an .env file
+# and load it with `dotenv` so your key isn't exposed with your code.
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = AsyncAnthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Anthropic Claude Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create and display empty chat
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = await llm.messages.create(
+ model="claude-3-opus-20240229",
+ messages=messages,
+ stream=True,
+ max_tokens=1000,
+ )
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-providers/gemini/app.py b/examples/chat/hello-providers/gemini/app.py
new file mode 100644
index 000000000..7fdaf6350
--- /dev/null
+++ b/examples/chat/hello-providers/gemini/app.py
@@ -0,0 +1,51 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by Google's Gemini model.
+# To run it, you'll need a Google API key.
+# To get one, follow the instructions at https://ai.google.dev/gemini-api/docs/get-started/tutorial?lang=python
+# ------------------------------------------------------------------------------------
+
+from google.generativeai import GenerativeModel
+
+from shiny.express import ui
+
+# You'll need to set the GOOGLE_API_KEY environment variable to your Google API key.
+# We recommend putting it in a .env file and loading it with `dotenv`:
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = GenerativeModel()
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Google Gemini Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create and display empty chat
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+
+ # Convert messages to the format expected by Google's API
+ contents = [
+ {
+ "role": "model" if x["role"] == "assistant" else x["role"],
+ "parts": x["content"],
+ }
+ for x in messages
+ ]
+
+ # Generate a response message stream
+ response = llm.generate_content(
+ contents=contents,
+ stream=True,
+ )
+
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-providers/langchain/app.py b/examples/chat/hello-providers/langchain/app.py
new file mode 100644
index 000000000..fb3a861de
--- /dev/null
+++ b/examples/chat/hello-providers/langchain/app.py
@@ -0,0 +1,39 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by OpenAI via LangChain.
+# To run it, you'll need OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# To use other providers/models via LangChain, see https://python.langchain.com/v0.1/docs/modules/model_io/chat/quick_start/
+# ------------------------------------------------------------------------------------
+import os
+
+from langchain_openai import ChatOpenAI
+
+from shiny.express import ui
+
+# Although you can set the API key here, it's recommended to put it in an .env file
+# and load it with `dotenv` so your key isn't exposed with your code:
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = ChatOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello LangChain Chat Models",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create and display an empty chat UI
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = llm.astream(messages)
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-providers/ollama/app.py b/examples/chat/hello-providers/ollama/app.py
new file mode 100644
index 000000000..44253272e
--- /dev/null
+++ b/examples/chat/hello-providers/ollama/app.py
@@ -0,0 +1,37 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by Ollama.
+# To run it, you'll need an Ollama server running locally.
+# To download and run the server, see https://github.com/ollama/ollama
+# To install the Ollama Python client, see https://github.com/ollama/ollama-python
+# ------------------------------------------------------------------------------------
+
+import ollama
+
+from shiny.express import ui
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Ollama Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create and display empty chat
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ # Assumes you've run `ollama run llama3` to start the server
+ response = ollama.chat(
+ model="llama3",
+ messages=messages,
+ stream=True,
+ )
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-providers/openai/app.py b/examples/chat/hello-providers/openai/app.py
new file mode 100644
index 000000000..fc5a57c9d
--- /dev/null
+++ b/examples/chat/hello-providers/openai/app.py
@@ -0,0 +1,49 @@
+# ------------------------------------------------------------------------------------
+# A basic Shiny Chat example powered by OpenAI's GPT-4o model.
+# To run it, you'll need OpenAI API key.
+# To get setup, follow the instructions at https://platform.openai.com/docs/quickstart
+# ------------------------------------------------------------------------------------
+import os
+
+from openai import AsyncOpenAI
+
+from shiny.express import ui
+
+# Although you can set the API key here, it's recommended to put it in an .env file
+# and load it with `dotenv` so your key isn't exposed with your code:
+# from dotenv import load_dotenv
+# _ = load_dotenv()
+llm = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello OpenAI Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a chat instance, with an initial message
+chat = ui.Chat(
+ id="chat",
+ messages=[
+ {"content": "Hello! How can I help you today?", "role": "assistant"},
+ ],
+)
+
+# Display the chat
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = await llm.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ stream=True,
+ )
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
diff --git a/examples/chat/hello-world/app-core.py b/examples/chat/hello-world/app-core.py
new file mode 100644
index 000000000..17d8395fb
--- /dev/null
+++ b/examples/chat/hello-world/app-core.py
@@ -0,0 +1,31 @@
+from shiny import App, ui
+
+app_ui = ui.page_fillable(
+ ui.panel_title("Hello Shiny Chat"),
+ ui.chat_ui("chat"),
+ fillable_mobile=True,
+)
+
+# Create a welcome message
+welcome = ui.markdown(
+ """
+ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will
+ simply repeat it back to you. For more examples, see this
+ [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat).
+ """
+)
+
+
+def server(input, output, session):
+ chat = ui.Chat(id="chat", messages=[welcome])
+
+ # Define a callback to run when the user submits a message
+ @chat.on_user_submit
+ async def _():
+ # Get the user's input
+ user = chat.user_input()
+ # Append a response to the chat
+ await chat.append_message(f"You said: {user}")
+
+
+app = App(app_ui, server)
diff --git a/examples/chat/hello-world/app.py b/examples/chat/hello-world/app.py
new file mode 100644
index 000000000..3eabe7c00
--- /dev/null
+++ b/examples/chat/hello-world/app.py
@@ -0,0 +1,35 @@
+from shiny.express import ui
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Shiny Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a welcome message
+welcome = ui.markdown(
+ """
+ Hi! This is a simple Shiny `Chat` UI. Enter a message below and I will
+ simply repeat it back to you. For more examples, see this
+ [folder of examples](https://github.com/posit-dev/py-shiny/tree/main/examples/chat).
+ """
+)
+
+# Create a chat instance
+chat = ui.Chat(
+ id="chat",
+ messages=[welcome],
+)
+
+# Display it
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get the user's input
+ user = chat.user_input()
+ # Append a response to the chat
+ await chat.append_message(f"You said: {user}")
diff --git a/examples/chat/playground/app.py b/examples/chat/playground/app.py
new file mode 100644
index 000000000..e7c1503e6
--- /dev/null
+++ b/examples/chat/playground/app.py
@@ -0,0 +1,91 @@
+# ------------------------------------------------------------------------------------
+# A Shiny Chat example showing how to use different language models via LangChain.
+# To run it with all the different providers/models, you'll need API keys for each.
+# Namely, OPENAI_API_KEY, ANTHROPIC_API_KEY, and GOOGLE_API_KEY.
+# To see how to get these keys, see the relevant basic examples.
+# (i.e., ../basic/openai/app.py, ../basic/anthropic/app.py, ../basic/gemini/app.py)
+# ------------------------------------------------------------------------------------
+
+from langchain_anthropic import ChatAnthropic
+from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
+from langchain_google_vertexai import VertexAI
+from langchain_openai import ChatOpenAI
+
+from shiny.express import input, render, ui
+
+models = {
+ "openai": ["gpt-4o", "gpt-3.5-turbo"],
+ "claude": [
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-3-haiku-20240307",
+ ],
+ "google": ["gemini-1.5-pro-latest"],
+}
+
+model_choices = {}
+for key, value in models.items():
+ model_choices[key] = dict(zip(value, value))
+
+ui.page_opts(
+ title="Shiny Chat Playground",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+with ui.sidebar(position="right"):
+ ui.input_select("model", "Model", choices=model_choices)
+ ui.input_select(
+ "system_actor",
+ "Response style",
+ choices=["Chuck Norris", "Darth Vader", "Yoda", "Gandalf", "Sherlock Holmes"],
+ )
+ ui.input_switch("stream", "Stream", value=False)
+ ui.input_slider("temperature", "Temperature", min=0, max=2, step=0.1, value=1)
+ ui.input_slider("max_tokens", "Max Tokens", min=1, max=4096, step=1, value=100)
+
+
+@render.express(fill=True, fillable=True)
+def chat_ui():
+ chat = ui.Chat(id="chat")
+
+ model_params = {
+ "model": input.model(),
+ "temperature": input.temperature(),
+ "max_tokens": input.max_tokens(),
+ }
+
+ if input.model() in models["openai"]:
+ llm = ChatOpenAI(**model_params)
+ elif input.model() in models["claude"]:
+ llm = ChatAnthropic(**model_params)
+ elif input.model() in models["google"]:
+ llm = VertexAI(**model_params)
+ else:
+ raise ValueError(f"Invalid model: {input.model()}")
+
+ system_message = SystemMessage(
+ f"You are a helpful AI assistant. Provide answers in the style of {input.system_actor()}."
+ )
+
+ @chat.on_user_submit
+ async def _():
+
+ # Transform ChatMessage(s) into langchain's message types
+ messages = [system_message]
+ for message in chat.messages():
+ role = message["role"]
+ content = message["content"]
+ if role == "user":
+ messages.append(HumanMessage(content))
+ elif role == "assistant":
+ messages.append(AIMessage(content))
+
+ if input.stream():
+ response = llm.astream(messages)
+ await chat.append_message_stream(response)
+ else:
+ response = await llm.ainvoke(messages)
+ await chat.append_message(response)
+
+ chat.ui()
diff --git a/examples/chat/ui/clear/app.py b/examples/chat/ui/clear/app.py
new file mode 100644
index 000000000..3582027b3
--- /dev/null
+++ b/examples/chat/ui/clear/app.py
@@ -0,0 +1,49 @@
+# --------------------------------------------------------------------------------
+# This example demonstrates how to clear the chat when the model changes.
+# To run it, you'll need an OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# --------------------------------------------------------------------------------
+import os
+
+from langchain_openai import ChatOpenAI
+
+from shiny import reactive
+from shiny.express import input, ui
+
+# Provide your API key here (or set the environment variable)
+llm = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+)
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello OpenAI Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a sidebar to select the model
+with ui.sidebar():
+ ui.input_select("model", "Model", ["gpt-4o", "gpt-3.5-turbo"])
+
+# Create and display an empty chat UI
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = await llm.astream(messages)
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
+
+
+# Clear the chat when the model changes
+@reactive.effect
+@reactive.event(input.model)
+async def _():
+ await chat.clear_messages()
diff --git a/examples/chat/ui/dark/app.py b/examples/chat/ui/dark/app.py
new file mode 100644
index 000000000..1548203b4
--- /dev/null
+++ b/examples/chat/ui/dark/app.py
@@ -0,0 +1,41 @@
+# --------------------------------------------------------------------------------
+# This example demonstrates Shiny Chat's dark mode capability.
+# To run it, you'll need an OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# --------------------------------------------------------------------------------
+import os
+
+from langchain_openai import ChatOpenAI
+
+from shiny.express import ui
+
+# Provide your API key here (or set the environment variable)
+llm = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+)
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello dark mode!",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a sidebar to select the dark mode
+with ui.sidebar(open="closed", position="right", width="100px"):
+ ui.tags.label("Dark mode", ui.input_dark_mode(mode="dark"))
+
+# Create and display an empty chat UI
+chat = ui.Chat(id="chat")
+chat.ui()
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ stream = llm.astream(messages)
+ # Append the response stream into the chat
+ await chat.append_message_stream(stream)
diff --git a/examples/chat/ui/dynamic/app.py b/examples/chat/ui/dynamic/app.py
new file mode 100644
index 000000000..96b298883
--- /dev/null
+++ b/examples/chat/ui/dynamic/app.py
@@ -0,0 +1,39 @@
+# -----------------------------------------------------------------------------
+# A basic example of dynamically re-rendering a Shiny Chat instance with different models.
+# To run it, you'll need an OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# -----------------------------------------------------------------------------
+import os
+
+from langchain_openai import ChatOpenAI
+
+from shiny.express import input, render, ui
+
+ui.input_select("model", "Model", choices=["gpt-4o", "gpt-3.5-turbo"])
+
+
+@render.express
+def chat_ui():
+
+ chat = ui.Chat(
+ id="chat",
+ messages=[
+ {
+ "content": f"Hi! I'm a {input.model()} model. How can I help you today?",
+ "role": "assistant",
+ }
+ ],
+ )
+
+ chat.ui()
+
+ llm = ChatOpenAI(
+ model=input.model(),
+ # Provide your API key here (or set the environment variable)
+ api_key=os.environ.get("OPENAI_API_KEY"),
+ )
+
+ @chat.on_user_submit
+ async def _():
+ response = llm.astream(chat.messages())
+ await chat.append_message_stream(response)
diff --git a/examples/chat/ui/sidebar/app.py b/examples/chat/ui/sidebar/app.py
new file mode 100644
index 000000000..7542b1c19
--- /dev/null
+++ b/examples/chat/ui/sidebar/app.py
@@ -0,0 +1,48 @@
+# -----------------------------------------------------------------------------
+# An example of placing a Shiny Chat instance in a sidebar (and having it fill the sidebar).
+# To run it, you'll need an OpenAI API key.
+# To get one, follow the instructions at https://platform.openai.com/docs/quickstart
+# -----------------------------------------------------------------------------
+import os
+
+from langchain_openai import ChatOpenAI
+
+from shiny.express import ui
+
+# Provide your API key here (or set the environment variable)
+llm = ChatOpenAI(
+ api_key=os.environ.get("OPENAI_API_KEY"),
+)
+
+# Set some Shiny page options
+ui.page_opts(
+ title="Hello Sidebar Chat",
+ fillable=True,
+ fillable_mobile=True,
+)
+
+# Create a chat instance, with an initial message
+chat = ui.Chat(
+ id="chat",
+ messages=[
+ {"content": "Hello! How can I help you today?", "role": "assistant"},
+ ],
+)
+
+# Display the chat in a sidebar
+with ui.sidebar(width=300, style="height:100%", position="right"):
+ chat.ui(height="100%")
+
+
+# Define a callback to run when the user submits a message
+@chat.on_user_submit
+async def _():
+ # Get messages currently in the chat
+ messages = chat.messages()
+ # Create a response message stream
+ response = llm.astream(messages)
+ # Append the response stream into the chat
+ await chat.append_message_stream(response)
+
+
+"Lorem ipsum dolor sit amet, consectetur adipiscing elit"
diff --git a/js/build.ts b/js/build.ts
index e08911790..fc57144af 100644
--- a/js/build.ts
+++ b/js/build.ts
@@ -74,6 +74,18 @@ const opts: Array = [
plugins: [sassPlugin({ type: "css", sourceMap: false })],
metafile: true,
},
+ {
+ entryPoints: {
+ "chat/chat": "chat/chat.ts",
+ },
+ minify: true,
+ sourcemap: true,
+ },
+ {
+ entryPoints: { "chat/chat": "chat/chat.scss" },
+ plugins: [sassPlugin({ type: "css", sourceMap: false })],
+ metafile: true,
+ },
];
// Run function to avoid top level await
diff --git a/js/chat/_utils.ts b/js/chat/_utils.ts
new file mode 100644
index 000000000..2c318fe3e
--- /dev/null
+++ b/js/chat/_utils.ts
@@ -0,0 +1,10 @@
+export function createElement(
+ tag_name: string,
+ attrs: { [key: string]: string | null }
+): HTMLElement {
+ const el = document.createElement(tag_name);
+ for (const [key, value] of Object.entries(attrs)) {
+ if (value !== null) el.setAttribute(key, value);
+ }
+ return el;
+}
diff --git a/js/chat/chat.scss b/js/chat/chat.scss
new file mode 100644
index 000000000..fcfa5a3a1
--- /dev/null
+++ b/js/chat/chat.scss
@@ -0,0 +1,142 @@
+@use "highlight_styles" as highlight_styles;
+
+shiny-chat-container {
+ --shiny-chat-border: var(--bs-border-width, 1px) solid var(--bs-border-color, #e9ecef);
+ --shiny-chat-border-radius: 26px;
+ --shiny-chat-user-message-bg: RGBA(var(--bs-primary-rgb, 0, 123, 194), 0.06);
+
+ display: flex;
+ flex-direction: column;
+ margin: 0 auto;
+ gap: 1rem;
+ overflow: auto;
+
+ p:last-child {
+ margin-bottom: 0;
+ }
+
+ shiny-chat-messages {
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+
+ shiny-chat-message {
+ display: grid;
+ grid-template-columns: auto minmax(0, 1fr);
+ gap: 1rem;
+ > * {
+ height: fit-content;
+ }
+ .message-icon {
+ border-radius: 50%;
+ border: var(--shiny-chat-border);
+ > * {
+ margin: 0.5rem;
+ height: 20px;
+ width: 20px;
+ }
+ }
+ /* Vertically center the 2nd column (message content) */
+ .message-content {
+ align-self: center;
+ }
+ }
+
+ /* Align the user message to the right */
+ shiny-user-message {
+ align-self: flex-end;
+ padding: 0.5rem 0.75rem;
+ border-radius: var(--shiny-chat-border-radius);
+ background-color: var(--shiny-chat-user-message-bg);
+ border: var(--shiny-chat-border);
+ }
+ }
+
+ shiny-chat-input {
+ margin-top: auto;
+ position: sticky;
+ bottom: 0;
+ padding: 0.25rem;
+ textarea {
+ --bs-border-radius: var(--shiny-chat-border-radius);
+ resize: none;
+ padding-right: 36px !important;
+ max-height: 175px;
+ }
+ button {
+ position: absolute;
+ bottom: 11px;
+ right: 12px;
+ background-color: transparent;
+ color: var(--bs-primary, #007bc2);
+ border: none;
+ padding: 0;
+ cursor: pointer;
+ line-height: 16px;
+ border-radius: 50%;
+ &:disabled {
+ cursor: not-allowed;
+ color: var(--bs-gray-500, #8d959e);
+ }
+ }
+ }
+}
+
+/*
+ Disable the page-level pulse when the chat input is disabled
+ (i.e., when a response is being generated and brought into the chat)
+*/
+.shiny-busy:has(shiny-chat-input[disabled])::after {
+ display: none;
+}
+
+/* Code highlighting (for both light and dark mode) */
+@include highlight_styles.atom_one_light;
+[data-bs-theme="dark"] {
+ @include highlight_styles.atom_one_dark;
+}
+
+/*
+ Styling for the code-copy button (inspired by Quarto's code-copy feature)
+*/
+pre:has(.code-copy-button) {
+ position: relative;
+}
+
+.code-copy-button {
+ position: absolute;
+ top: 0;
+ right: 0;
+ border: 0;
+ margin-top: 5px;
+ margin-right: 5px;
+ z-index: 3;
+ background-color: transparent;
+
+ > .bi {
+ display: flex;
+ gap: 0.25em;
+
+ &::after {
+ content: "";
+ display: block;
+ height: 1rem;
+ width: 1rem;
+ mask-image: url('data:image/svg+xml,');
+ background-color: var(--bs-body-color, #222);
+ }
+ }
+}
+
+.code-copy-button-checked {
+ > .bi::before {
+ content: "Copied!";
+ font-size: 0.75em;
+ vertical-align: 0.25em;
+ }
+
+ > .bi::after {
+ mask-image: url('data:image/svg+xml,');
+ background-color: var(--bs-success, #198754);
+ }
+}
diff --git a/js/chat/chat.ts b/js/chat/chat.ts
new file mode 100644
index 000000000..206e5dfae
--- /dev/null
+++ b/js/chat/chat.ts
@@ -0,0 +1,391 @@
+import { LitElement, html } from "lit";
+import { unsafeHTML } from "lit-html/directives/unsafe-html.js";
+import { property } from "lit/decorators.js";
+
+import ClipboardJS from "clipboard";
+import { sanitize } from "dompurify";
+import hljs from "highlight.js/lib/common";
+import { parse } from "marked";
+
+import { createElement } from "./_utils";
+
+type ContentType = "markdown" | "html" | "text";
+
+type Message = {
+ content: string;
+ role: "user" | "assistant";
+ chunk_type: "message_start" | "message_end" | null;
+ content_type: ContentType;
+};
+type ShinyChatMessage = {
+ id: string;
+ handler: string;
+ obj: Message;
+};
+
+// https://github.com/microsoft/TypeScript/issues/28357#issuecomment-748550734
+declare global {
+ interface GlobalEventHandlersEventMap {
+ "shiny-chat-input-sent": CustomEvent;
+ "shiny-chat-append-message": CustomEvent;
+ "shiny-chat-append-message-chunk": CustomEvent;
+ "shiny-chat-clear-messages": CustomEvent;
+ "shiny-chat-set-user-input": CustomEvent;
+ "shiny-chat-remove-loading-message": CustomEvent;
+ }
+}
+
+const CHAT_MESSAGE_TAG = "shiny-chat-message";
+const CHAT_USER_MESSAGE_TAG = "shiny-user-message";
+const CHAT_MESSAGES_TAG = "shiny-chat-messages";
+const CHAT_INPUT_TAG = "shiny-chat-input";
+const CHAT_CONTAINER_TAG = "shiny-chat-container";
+
+// https://lit.dev/docs/components/shadow-dom/#implementing-createrenderroot
+class LightElement extends LitElement {
+ createRenderRoot() {
+ return this;
+ }
+}
+
+class ChatMessage extends LightElement {
+ @property() content = "...";
+ @property() content_type: ContentType = "markdown";
+
+ render(): ReturnType {
+ let content;
+ if (this.content_type === "markdown") {
+ content = unsafeHTML(sanitize(parse(this.content) as string));
+ } else if (this.content_type === "html") {
+ content = unsafeHTML(sanitize(this.content));
+ } else if (this.content_type === "text") {
+ content = this.content;
+ } else {
+ throw new Error(`Unknown content type: ${this.content_type}`);
+ }
+
+ // TODO: support custom icons
+ const icon =
+ '';
+
+ return html`
+