Skip to content

Commit

Permalink
Convert Docs Demos to Lite (#7661)
Browse files Browse the repository at this point in the history
* edit demos

* edit demos

* remove and replace demos

* convert docs demos to lite

* add changeset

* working

* styling

* notebook

* notebook

* formatting

* /docs embedding from spaces

* shared worker mode

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
  • Loading branch information
aliabd and gradio-pr-bot committed Mar 23, 2024
1 parent b0a3ea9 commit c62a57e
Show file tree
Hide file tree
Showing 33 changed files with 112 additions and 70 deletions.
6 changes: 6 additions & 0 deletions .changeset/upset-pens-tie.md
@@ -0,0 +1,6 @@
---
"gradio": minor
"website": minor
---

feat:Convert Docs Demos to Lite
2 changes: 1 addition & 1 deletion demo/chatbot_multimodal/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"files/avatar.png\"))),\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True, file_types=[\"image\"], placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True, file_types=[\"image\"], placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
1 change: 0 additions & 1 deletion demo/chatbot_multimodal/run.py
Expand Up @@ -29,7 +29,6 @@ def bot(history):
[],
elem_id="chatbot",
bubble_full_width=False,
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "files/avatar.png"))),
)

chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
Expand Down
2 changes: 1 addition & 1 deletion demo/color_picker/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: color_picker"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio Pillow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/color_picker/rabbit.png"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import os\n", "from PIL import Image, ImageColor\n", "\n", "\n", "def change_color(icon, color):\n", "\n", " \"\"\"\n", " Function that given an icon in .png format changes its color\n", " Args:\n", " icon: Icon whose color needs to be changed.\n", " color: Chosen color with which to edit the input icon.\n", " Returns:\n", " edited_image: Edited icon.\n", " \"\"\"\n", " img = icon.convert(\"LA\")\n", " img = img.convert(\"RGBA\")\n", " image_np = np.array(icon)\n", " _, _, _, alpha = image_np.T\n", " mask = alpha > 0\n", " image_np[..., :-1][mask.T] = ImageColor.getcolor(color, \"RGB\")\n", " edited_image = Image.fromarray(image_np)\n", " return edited_image\n", "\n", "\n", "inputs = [\n", " gr.Image(label=\"icon\", type=\"pil\", image_mode=\"RGBA\"),\n", " gr.ColorPicker(label=\"color\"),\n", "]\n", "outputs = gr.Image(label=\"colored icon\")\n", "\n", "demo = gr.Interface(\n", " fn=change_color,\n", " inputs=inputs,\n", " outputs=outputs,\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"rabbit.png\"), \"#ff0000\"],\n", " [os.path.join(os.path.abspath(''), \"rabbit.png\"), \"#0000FF\"],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: color_picker"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio Pillow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/color_picker/rabbit.png"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import os\n", "from PIL import Image, ImageColor\n", "\n", "\n", "def change_color(icon, color):\n", "\n", " \"\"\"\n", " Function that given an icon in .png format changes its color\n", " Args:\n", " icon: Icon whose color needs to be changed.\n", " color: Chosen color with which to edit the input icon.\n", " Returns:\n", " edited_image: Edited icon.\n", " \"\"\"\n", " img = icon.convert(\"LA\")\n", " img = img.convert(\"RGBA\")\n", " image_np = np.array(icon)\n", " _, _, _, alpha = image_np.T\n", " mask = alpha > 0\n", " image_np[..., :-1][mask.T] = ImageColor.getcolor(color, \"RGB\")\n", " edited_image = Image.fromarray(image_np)\n", " return edited_image\n", "\n", "\n", "inputs = [\n", " gr.Image(label=\"icon\", type=\"pil\", image_mode=\"RGBA\"),\n", " gr.ColorPicker(label=\"color\"),\n", "]\n", "outputs = gr.Image(label=\"colored icon\")\n", "\n", "demo = gr.Interface(\n", " fn=change_color,\n", " inputs=inputs,\n", " outputs=outputs\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
6 changes: 1 addition & 5 deletions demo/color_picker/run.py
Expand Up @@ -33,11 +33,7 @@ def change_color(icon, color):
demo = gr.Interface(
fn=change_color,
inputs=inputs,
outputs=outputs,
examples=[
[os.path.join(os.path.dirname(__file__), "rabbit.png"), "#ff0000"],
[os.path.join(os.path.dirname(__file__), "rabbit.png"), "#0000FF"],
],
outputs=outputs
)

if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion demo/reverse_audio/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: reverse_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def reverse_audio(audio):\n", " sr, data = audio\n", " return (sr, np.flipud(data))\n", "\n", "\n", "input_audio = gr.Audio(\n", " sources=[\"microphone\"],\n", " waveform_options=gr.WaveformOptions(\n", " waveform_color=\"#01C6FF\",\n", " waveform_progress_color=\"#0066B4\",\n", " skip_length=2,\n", " show_controls=False,\n", " ),\n", ")\n", "demo = gr.Interface(\n", " fn=reverse_audio,\n", " inputs=input_audio,\n", " outputs=\"audio\",\n", " examples=[\n", " \"https://samplelib.com/lib/preview/mp3/sample-3s.mp3\",\n", " os.path.join(os.path.abspath(''), \"audio/recording1.wav\"),\n", " ],\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: reverse_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/reverse_audio/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def reverse_audio(audio):\n", " sr, data = audio\n", " return (sr, np.flipud(data))\n", "\n", "\n", "input_audio = gr.Audio(\n", " sources=[\"microphone\"],\n", " waveform_options=gr.WaveformOptions(\n", " waveform_color=\"#01C6FF\",\n", " waveform_progress_color=\"#0066B4\",\n", " skip_length=2,\n", " show_controls=False,\n", " ),\n", ")\n", "demo = gr.Interface(\n", " fn=reverse_audio,\n", " inputs=input_audio,\n", " outputs=\"audio\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
7 changes: 1 addition & 6 deletions demo/reverse_audio/run.py
Expand Up @@ -22,12 +22,7 @@ def reverse_audio(audio):
demo = gr.Interface(
fn=reverse_audio,
inputs=input_audio,
outputs="audio",
examples=[
"https://samplelib.com/lib/preview/mp3/sample-3s.mp3",
os.path.join(os.path.dirname(__file__), "audio/recording1.wav"),
],
cache_examples=True,
outputs="audio"
)

if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion demo/sort_records/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sort_records"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sort_records/polars_sort.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "def sort_records(records):\n", " return records.sort(\"Quantity\")\n", "\n", "demo = gr.Interface(\n", " sort_records,\n", " gr.Dataframe(\n", " headers=[\"Item\", \"Quantity\"],\n", " datatype=[\"str\", \"number\"],\n", " row_count=3,\n", " col_count=(2, \"fixed\"),\n", " type=\"polars\"\n", " ),\n", " \"dataframe\",\n", " description=\"Sort by Quantity\",\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"polars_sort.csv\")],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sort_records"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/sort_records/polars_sort.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "def sort_records(records):\n", " return records.sort(\"Quantity\")\n", "\n", "demo = gr.Interface(\n", " sort_records,\n", " gr.Dataframe(\n", " headers=[\"Item\", \"Quantity\"],\n", " datatype=[\"str\", \"number\"],\n", " row_count=3,\n", " col_count=(2, \"fixed\"),\n", " type=\"polars\"\n", " ),\n", " \"dataframe\",\n", " description=\"Sort by Quantity\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
5 changes: 1 addition & 4 deletions demo/sort_records/run.py
Expand Up @@ -14,10 +14,7 @@ def sort_records(records):
type="polars"
),
"dataframe",
description="Sort by Quantity",
examples=[
[os.path.join(os.path.dirname(__file__), "polars_sort.csv")],
],
description="Sort by Quantity"
)

if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion gradio/blocks.py
Expand Up @@ -538,7 +538,7 @@ def update(name):
btn.click(fn=update, inputs=inp, outputs=out)
demo.launch()
Demos: blocks_hello, blocks_flipper, blocks_speech_text_sentiment, generate_english_german
Demos: blocks_hello, blocks_flipper, blocks_kinematics
Guides: blocks-and-event-listeners, controlling-layout, state-in-blocks, custom-CSS-and-JS, using-blocks-like-functions
"""

Expand Down
2 changes: 1 addition & 1 deletion gradio/components/audio.py
Expand Up @@ -50,7 +50,7 @@ class Audio(
):
"""
Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).
Demos: main_note, generate_tone, reverse_audio
Demos: generate_tone, reverse_audio
Guides: real-time-speech-recognition
"""

Expand Down
2 changes: 1 addition & 1 deletion gradio/components/bar_plot.py
Expand Up @@ -17,7 +17,7 @@ class BarPlot(Plot):
Creates a bar plot component to display data from a pandas DataFrame (as output). As this component does
not accept user input, it is rarely used as an input component.
Demos: bar_plot, chicago-bikeshare-dashboard
Demos: bar_plot
"""

data_model = AltairPlotData
Expand Down
2 changes: 1 addition & 1 deletion gradio/components/checkbox.py
Expand Up @@ -16,7 +16,7 @@ class Checkbox(FormComponent):
Creates a checkbox that can be set to `True` or `False`. Can be used as an input to pass a boolean value to a function or as an output
to display a boolean value.
Demos: sentence_builder, titanic_survival
Demos: sentence_builder, hello_world_3
"""

EVENTS = [Events.change, Events.input, Events.select]
Expand Down
2 changes: 1 addition & 1 deletion gradio/components/checkboxgroup.py
Expand Up @@ -14,7 +14,7 @@
class CheckboxGroup(FormComponent):
"""
Creates a set of checkboxes. Can be used as an input to pass a set of values to a function or as an output to display values, a subset of which are selected.
Demos: sentence_builder, titanic_survival
Demos: sentence_builder
"""

EVENTS = [Events.change, Events.input, Events.select]
Expand Down
2 changes: 1 addition & 1 deletion gradio/components/color_picker.py
Expand Up @@ -14,7 +14,7 @@
class ColorPicker(Component):
"""
Creates a color picker for user to select a color as string input. Can be used as an input to pass a color value to a function or as an output to display a color value.
Demos: color_picker, color_generator
Demos: color_picker
"""

EVENTS = [Events.change, Events.input, Events.submit, Events.focus, Events.blur]
Expand Down
2 changes: 1 addition & 1 deletion gradio/components/dropdown.py
Expand Up @@ -16,7 +16,7 @@ class Dropdown(FormComponent):
"""
Creates a dropdown of choices from which a single entry or multiple entries can be selected (as an input component) or displayed (as an output component).
Demos: sentence_builder, titanic_survival
Demos: sentence_builder
"""

EVENTS = [
Expand Down
2 changes: 1 addition & 1 deletion gradio/components/highlighted_text.py
Expand Up @@ -25,7 +25,7 @@ class HighlightedText(Component):
"""
Displays text that contains spans that are highlighted by category or numerical value.
Demos: diff_texts, text_analysis
Demos: diff_texts
Guides: named-entity-recognition
"""

Expand Down
2 changes: 1 addition & 1 deletion gradio/components/html.py
Expand Up @@ -15,7 +15,7 @@ class HTML(Component):
"""
Creates a component to display arbitrary HTML output. As this component does not accept user input, it is rarely used as an input component.
Demos: text_analysis
Demos: blocks_scroll
Guides: key-features
"""

Expand Down

0 comments on commit c62a57e

Please sign in to comment.