Skip to content

Commit

Permalink
Fix various issues with demos on website (#6268)
Browse files Browse the repository at this point in the history
* fix demos

* demos on landing page

* make code interactive on playground

* add changeset

* try new secret

* formatting

* fix fake_gan

* demo notebooks

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
  • Loading branch information
3 people committed Nov 3, 2023
1 parent e32bac8 commit de36820
Show file tree
Hide file tree
Showing 61 changed files with 81 additions and 1,649 deletions.
6 changes: 6 additions & 0 deletions .changeset/quick-cases-stay.md
@@ -0,0 +1,6 @@
---
"gradio": minor
"website": minor
---

feat:Fix various issues with demos on website
2 changes: 1 addition & 1 deletion .github/workflows/deploy-pr-to-spaces.yml
Expand Up @@ -62,7 +62,7 @@ jobs:
github.event.workflow_run.conclusion == 'success'
id: upload-website-demos
run: |
python scripts/upload_website_demos.py --AUTH_TOKEN ${{ secrets.SPACES_DEPLOY_TOKEN }} \
python scripts/upload_website_demos.py --AUTH_TOKEN ${{ secrets.WEBSITE_SPACES_DEPLOY_TOKEN }} \
--WHEEL_URL https://gradio-builds.s3.amazonaws.com/${{ steps.set-outputs.outputs.gh_sha }}/ \
--GRADIO_VERSION ${{ steps.set-outputs.outputs.gradio_version }}
Expand Down
2 changes: 1 addition & 1 deletion demo/Echocardiogram-Segmentation/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: Echocardiogram-Segmentation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio -f https://download.pytorch.org/whl/torch_stable.html numpy matplotlib wget torch torchvision "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img1.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img2.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import numpy as np\n", "import torch\n", "import torchvision\n", "import wget \n", "\n", "\n", "destination_folder = \"output\"\n", "destination_for_weights = \"weights\"\n", "\n", "if os.path.exists(destination_for_weights):\n", " print(\"The weights are at\", destination_for_weights)\n", "else:\n", " print(\"Creating folder at \", destination_for_weights, \" to store weights\")\n", " os.mkdir(destination_for_weights)\n", " \n", "segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'\n", "\n", "if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))):\n", " print(\"Downloading Segmentation Weights, \", segmentationWeightsURL,\" to \",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " filename = wget.download(segmentationWeightsURL, out = destination_for_weights)\n", "else:\n", " print(\"Segmentation Weights already present\")\n", "\n", "torch.cuda.empty_cache()\n", "\n", "def collate_fn(x):\n", " x, f = zip(*x)\n", " i = list(map(lambda t: t.shape[1], x))\n", " x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))\n", " return x, f, i\n", "\n", "model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False)\n", "model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size)\n", "\n", "print(\"loading weights from \", os.path.join(destination_for_weights, \"deeplabv3_resnet50_random\"))\n", "\n", "if torch.cuda.is_available():\n", " print(\"cuda is available, original weights\")\n", " device = torch.device(\"cuda\")\n", " model = torch.nn.DataParallel(model)\n", " model.to(device)\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " model.load_state_dict(checkpoint['state_dict'])\n", "else:\n", " print(\"cuda is not available, cpu weights\")\n", " device = torch.device(\"cpu\")\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = \"cpu\")\n", " state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n", " model.load_state_dict(state_dict_cpu)\n", "\n", "model.eval()\n", "\n", "def segment(input):\n", " inp = input\n", " x = inp.transpose([2, 0, 1]) # channels-first\n", " x = np.expand_dims(x, axis=0) # adding a batch dimension \n", " \n", " mean = x.mean(axis=(0, 2, 3))\n", " std = x.std(axis=(0, 2, 3))\n", " x = x - mean.reshape(1, 3, 1, 1)\n", " x = x / std.reshape(1, 3, 1, 1)\n", " \n", " with torch.no_grad():\n", " x = torch.from_numpy(x).type('torch.FloatTensor').to(device)\n", " output = model(x) \n", " \n", " y = output['out'].numpy()\n", " y = y.squeeze()\n", " \n", " out = y>0 \n", " \n", " mask = inp.copy()\n", " mask[out] = np.array([0, 0, 255])\n", " \n", " return mask\n", "\n", "import gradio as gr\n", "\n", "i = gr.Image(shape=(112, 112), label=\"Echocardiogram\")\n", "o = gr.Image(label=\"Segmentation Mask\")\n", "\n", "examples = [[\"img1.jpg\"], [\"img2.jpg\"]]\n", "title = None #\"Left Ventricle Segmentation\"\n", "description = \"This semantic segmentation model identifies the left ventricle in echocardiogram images.\"\n", "# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of \u2018Video-based AI for beat-to-beat assessment of cardiac function\u2019 by Ouyang et al. in Nature, 2020.\"\n", "thumbnail = \"https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png\"\n", "gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: Echocardiogram-Segmentation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio -f https://download.pytorch.org/whl/torch_stable.html numpy matplotlib wget torch torchvision "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img1.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/Echocardiogram-Segmentation/img2.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import numpy as np\n", "import torch\n", "import torchvision\n", "import wget \n", "\n", "\n", "destination_folder = \"output\"\n", "destination_for_weights = \"weights\"\n", "\n", "if os.path.exists(destination_for_weights):\n", " print(\"The weights are at\", destination_for_weights)\n", "else:\n", " print(\"Creating folder at \", destination_for_weights, \" to store weights\")\n", " os.mkdir(destination_for_weights)\n", " \n", "segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'\n", "\n", "if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))):\n", " print(\"Downloading Segmentation Weights, \", segmentationWeightsURL,\" to \",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " filename = wget.download(segmentationWeightsURL, out = destination_for_weights)\n", "else:\n", " print(\"Segmentation Weights already present\")\n", "\n", "torch.cuda.empty_cache()\n", "\n", "def collate_fn(x):\n", " x, f = zip(*x)\n", " i = list(map(lambda t: t.shape[1], x))\n", " x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))\n", " return x, f, i\n", "\n", "model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False)\n", "model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size)\n", "\n", "print(\"loading weights from \", os.path.join(destination_for_weights, \"deeplabv3_resnet50_random\"))\n", "\n", "if torch.cuda.is_available():\n", " print(\"cuda is available, original weights\")\n", " device = torch.device(\"cuda\")\n", " model = torch.nn.DataParallel(model)\n", " model.to(device)\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))\n", " model.load_state_dict(checkpoint['state_dict'])\n", "else:\n", " print(\"cuda is not available, cpu weights\")\n", " device = torch.device(\"cpu\")\n", " checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = \"cpu\")\n", " state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}\n", " model.load_state_dict(state_dict_cpu)\n", "\n", "model.eval()\n", "\n", "def segment(input):\n", " inp = input\n", " x = inp.transpose([2, 0, 1]) # channels-first\n", " x = np.expand_dims(x, axis=0) # adding a batch dimension \n", " \n", " mean = x.mean(axis=(0, 2, 3))\n", " std = x.std(axis=(0, 2, 3))\n", " x = x - mean.reshape(1, 3, 1, 1)\n", " x = x / std.reshape(1, 3, 1, 1)\n", " \n", " with torch.no_grad():\n", " x = torch.from_numpy(x).type('torch.FloatTensor').to(device)\n", " output = model(x) \n", " \n", " y = output['out'].numpy()\n", " y = y.squeeze()\n", " \n", " out = y>0 \n", " \n", " mask = inp.copy()\n", " mask[out] = np.array([0, 0, 255])\n", " \n", " return mask\n", "\n", "import gradio as gr\n", "\n", "i = gr.Image(label=\"Echocardiogram\")\n", "o = gr.Image(label=\"Segmentation Mask\")\n", "\n", "examples = [[\"img1.jpg\"], [\"img2.jpg\"]]\n", "title = None #\"Left Ventricle Segmentation\"\n", "description = \"This semantic segmentation model identifies the left ventricle in echocardiogram images.\"\n", "# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of \u2018Video-based AI for beat-to-beat assessment of cardiac function\u2019 by Ouyang et al. in Nature, 2020.\"\n", "thumbnail = \"https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png\"\n", "gr.Interface(segment, i, o, examples=examples, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
4 changes: 2 additions & 2 deletions demo/Echocardiogram-Segmentation/run.py
Expand Up @@ -77,12 +77,12 @@ def segment(input):

import gradio as gr

i = gr.Image(shape=(112, 112), label="Echocardiogram")
i = gr.Image(label="Echocardiogram")
o = gr.Image(label="Segmentation Mask")

examples = [["img1.jpg"], ["img2.jpg"]]
title = None #"Left Ventricle Segmentation"
description = "This semantic segmentation model identifies the left ventricle in echocardiogram images."
# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of ‘Video-based AI for beat-to-beat assessment of cardiac function’ by Ouyang et al. in Nature, 2020."
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png"
gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()
gr.Interface(segment, i, o, examples=examples, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()
2 changes: 1 addition & 1 deletion demo/animeganv2/run.ipynb
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.outputs.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.Image(type=\"pil\"),gr.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", value='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
4 changes: 2 additions & 2 deletions demo/animeganv2/run.py
Expand Up @@ -27,8 +27,8 @@ def inference(img, ver):

demo = gr.Interface(
fn=inference,
inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
outputs=gr.outputs.Image(type="pil"),
inputs=[gr.Image(type="pil"),gr.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", value='version 2 (🔺 robustness,🔻 stylization)', label='version')],
outputs=gr.Image(type="pil"),
title=title,
description=description,
article=article,
Expand Down
Binary file removed demo/blocks_mask/image.png
Binary file not shown.
Binary file removed demo/blocks_mask/lion.jpg
Binary file not shown.
Binary file removed demo/blocks_mask/lion.webp
Binary file not shown.

0 comments on commit de36820

Please sign in to comment.