/
demos.json
1 lines (1 loc) 路 56.5 KB
/
demos.json
1
[{"category": "\ud83d\udd8a\ufe0f Text & Natural Language Processing", "demos": [{"name": "Hello World", "dir": "hello_world", "code": "import gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\ndemo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n \nif __name__ == \"__main__\":\n demo.launch() ", "text": "The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text."}, {"name": "Text Generation", "dir": "text_generation", "code": "import gradio as gr\nfrom transformers import pipeline\n\ngenerator = pipeline('text-generation', model='gpt2')\n\ndef generate(text):\n result = generator(text, max_length=30, num_return_sequences=1)\n return result[0][\"generated_text\"]\n\nexamples = [\n [\"The Moon's orbit around Earth has\"],\n [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n]\n\ndemo = gr.Interface(\n fn=generate,\n inputs=gr.inputs.Textbox(lines=5, label=\"Input Text\"),\n outputs=gr.outputs.Textbox(label=\"Generated Text\"),\n examples=examples\n)\n\ndemo.launch()\n", "text": "This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples."}, {"name": "Autocomplete", "dir": "autocomplete", "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# load a model from https://hf.co/models as an interface, then use it as an api \n# you can remove the api_key parameter if you don't care about rate limiting. \napi = gr.load(\"huggingface/gpt2-xl\", hf_token=auth_token)\n\ndef complete_with_gpt(text):\n return text[:-50] + api(text[-50:])\n\nwith gr.Blocks() as demo:\n textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n btn = gr.Button(\"Autocomplete\")\n \n # define what will run when the button is clicked, here the textbox is used as both an input and an output\n btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n\ndemo.launch()", "text": "This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code."}, {"name": "Sentiment Analysis", "dir": "sentiment_analysis", "code": "import gradio as gr\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\nnltk.download(\"vader_lexicon\")\nsid = SentimentIntensityAnalyzer()\n\ndef sentiment_analysis(text):\n scores = sid.polarity_scores(text)\n del scores[\"compound\"]\n return scores\n\ndemo = gr.Interface(\n fn=sentiment_analysis, \n inputs=gr.Textbox(placeholder=\"Enter a positive or negative sentence here...\"), \n outputs=\"label\", \n interpretation=\"default\",\n examples=[[\"This is wonderful!\"]])\n\ndemo.launch()", "text": "This sentiment analaysis demo takes in input text and returns its classification for either positive, negative or neutral using Gradio's Label output. It also uses the default interpretation method so users can click the Interpret button after a submission and see which words had the biggest effect on the output."}, {"name": "Named Entity Recognition", "dir": "text_analysis", "code": "import gradio as gr\nimport os\nos.system('python -m spacy download en_core_web_sm')\nimport spacy\nfrom spacy import displacy\n\nnlp = spacy.load(\"en_core_web_sm\")\n\ndef text_analysis(text):\n doc = nlp(text)\n html = displacy.render(doc, style=\"dep\", page=True)\n html = (\n \"<div style='max-width:100%; max-height:360px; overflow:auto'>\"\n + html\n + \"</div>\"\n )\n pos_count = {\n \"char_count\": len(text),\n \"token_count\": 0,\n }\n pos_tokens = []\n\n for token in doc:\n pos_tokens.extend([(token.text, token.pos_), (\" \", None)])\n\n return pos_tokens, pos_count, html\n\ndemo = gr.Interface(\n text_analysis,\n gr.Textbox(placeholder=\"Enter sentence here...\"),\n [\"highlight\", \"json\", \"html\"],\n examples=[\n [\"What a beautiful morning for a walk!\"],\n [\"It was the best of times, it was the worst of times.\"],\n ],\n)\n\ndemo.launch()\n", "text": "This simple demo takes advantage of Gradio's HighlightedText, JSON and HTML outputs to create a clear NER segmentation."}, {"name": "Multilingual Translation", "dir": "translation", "code": "import gradio as gr\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\nimport torch\n\n# this model was loaded from https://hf.co/models\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\ndevice = 0 if torch.cuda.is_available() else -1\nLANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n\ndef translate(text, src_lang, tgt_lang):\n \"\"\"\n Translate the text from source lang to target lang\n \"\"\"\n translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n result = translation_pipeline(text)\n return result[0]['translation_text']\n\ndemo = gr.Interface(\n fn=translate,\n inputs=[\n gr.components.Textbox(label=\"Text\"),\n gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n ],\n outputs=[\"text\"],\n examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n cache_examples=False,\n title=\"Translation Demo\",\n description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n)\n\ndemo.launch()", "text": "This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example."}]}, {"category": "\ud83d\uddbc\ufe0f Images & Computer Vision", "demos": [{"name": "Image Classification", "dir": "image_classification", "code": "import gradio as gr\nimport torch\nimport requests\nfrom torchvision import transforms\n\nmodel = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\nresponse = requests.get(\"https://git.io/JJkYN\")\nlabels = response.text.split(\"\\n\")\n\ndef predict(inp):\n inp = transforms.ToTensor()(inp).unsqueeze(0)\n with torch.no_grad():\n prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n return confidences\n\ndemo = gr.Interface(fn=predict, \n inputs=gr.inputs.Image(type=\"pil\"),\n outputs=gr.outputs.Label(num_top_classes=3),\n examples=[[\"cheetah.jpg\"]],\n )\n \ndemo.launch()", "text": "Simple image classification in Pytorch with Gradio's Image input and Label output."}, {"name": "Image Segmentation", "dir": "image_segmentation", "code": "import gradio as gr\nimport numpy as np\nimport random\n\nwith gr.Blocks() as demo:\n section_labels = [\n \"apple\",\n \"banana\",\n \"carrot\",\n \"donut\",\n \"eggplant\",\n \"fish\",\n \"grapes\",\n \"hamburger\",\n \"ice cream\",\n \"juice\",\n ]\n\n with gr.Row():\n num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n\n with gr.Row():\n img_input = gr.Image()\n img_output = gr.AnnotatedImage().style(\n color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n )\n\n section_btn = gr.Button(\"Identify Sections\")\n selected_section = gr.Textbox(label=\"Selected Section\")\n\n def section(img, num_boxes, num_segments):\n sections = []\n for a in range(num_boxes):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n w = random.randint(0, img.shape[1] - x)\n h = random.randint(0, img.shape[0] - y)\n sections.append(((x, y, x + w, y + h), section_labels[a]))\n for b in range(num_segments):\n x = random.randint(0, img.shape[1])\n y = random.randint(0, img.shape[0])\n r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n mask = np.zeros(img.shape[:2])\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n dist_square = (i - y) ** 2 + (j - x) ** 2\n if dist_square < r**2:\n mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n sections.append((mask, section_labels[b + num_boxes]))\n return (img, sections)\n\n section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n\n def select_section(evt: gr.SelectData):\n return section_labels[evt.index]\n\n img_output.select(select_section, None, selected_section)\n\nif __name__ == \"__main__\":\n demo.launch()\n", "text": "Simple image segmentation using gradio's AnnotatedImage component."}, {"name": "Image Transformation with AnimeGAN", "dir": "animeganv2", "code": "import gradio as gr\nimport torch\n\nmodel2 = torch.hub.load(\n \"AK391/animegan2-pytorch:main\",\n \"generator\",\n pretrained=True,\n progress=False\n)\nmodel1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\nface2paint = torch.hub.load(\n 'AK391/animegan2-pytorch:main', 'face2paint', \n size=512,side_by_side=False\n)\n\ndef inference(img, ver):\n if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n out = face2paint(model2, img)\n else:\n out = face2paint(model1, img)\n return out\n\ntitle = \"AnimeGANv2\"\ndescription = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\narticle = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\nexamples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n\ndemo = gr.Interface(\n fn=inference, \n inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n outputs=gr.outputs.Image(type=\"pil\"),\n title=title,\n description=description,\n article=article,\n examples=examples)\n\ndemo.launch()", "text": "Recreate the viral AnimeGAN image transformation demo."}, {"name": "Image Generation (Fake GAN)", "dir": "fake_gan", "code": "# This demo needs to be run from the repo folder.\n# python demo/fake_gan/run.py\nimport random\n\nimport gradio as gr\n\n\ndef fake_gan():\n images = [\n (random.choice(\n [\n \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n ]\n ), f\"label {i}\" if i != 0 else \"label\" * 50)\n for i in range(3)\n ]\n return images\n\n\nwith gr.Blocks() as demo:\n with gr.Column(variant=\"panel\"):\n with gr.Row(variant=\"compact\"):\n text = gr.Textbox(\n label=\"Enter your prompt\",\n show_label=False,\n max_lines=1,\n placeholder=\"Enter your prompt\",\n ).style(\n container=False,\n )\n btn = gr.Button(\"Generate image\").style(full_width=False)\n\n gallery = gr.Gallery(\n label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n\n btn.click(fake_gan, None, gallery)\n\nif __name__ == \"__main__\":\n demo.launch()\n", "text": "This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/"}, {"name": "Iterative Output", "dir": "fake_diffusion", "code": "import gradio as gr\nimport numpy as np\nimport time\n\n# define core fn, which returns a generator {steps} times before returning the image\ndef fake_diffusion(steps):\n for _ in range(steps):\n time.sleep(1)\n image = np.random.random((600, 600, 3))\n yield image\n image = \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\"\n yield image\n\n\ndemo = gr.Interface(fake_diffusion, inputs=gr.Slider(1, 10, 3), outputs=\"image\")\n\n# define queue - required for generators\ndemo.queue()\n\ndemo.launch()\n", "text": "This demo uses a fake model to showcase iterative output. The Image output will update every time a generator is returned until the final image."}, {"name": "3D Models", "dir": "depth_estimation", "code": "import gradio as gr\nfrom transformers import DPTFeatureExtractor, DPTForDepthEstimation\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport open3d as o3d\nfrom pathlib import Path\n\nfeature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\nmodel = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n\ndef process_image(image_path):\n image_path = Path(image_path)\n image_raw = Image.open(image_path)\n image = image_raw.resize(\n (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n Image.Resampling.LANCZOS)\n\n # prepare image for the model\n encoding = feature_extractor(image, return_tensors=\"pt\")\n\n # forward pass\n with torch.no_grad():\n outputs = model(**encoding)\n predicted_depth = outputs.predicted_depth\n\n # interpolate to original size\n prediction = torch.nn.functional.interpolate(\n predicted_depth.unsqueeze(1),\n size=image.size[::-1],\n mode=\"bicubic\",\n align_corners=False,\n ).squeeze()\n output = prediction.cpu().numpy()\n depth_image = (output * 255 / np.max(output)).astype('uint8')\n try:\n gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except Exception:\n gltf_path = create_3d_obj(\n np.array(image), depth_image, image_path, depth=8)\n img = Image.fromarray(depth_image)\n return [img, gltf_path, gltf_path]\n except:\n print(\"Error reconstructing 3D model\")\n raise Exception(\"Error reconstructing 3D model\")\n\n\ndef create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n depth_o3d = o3d.geometry.Image(depth_image)\n image_o3d = o3d.geometry.Image(rgb_image)\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n w = int(depth_image.shape[1])\n h = int(depth_image.shape[0])\n\n camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n\n pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n rgbd_image, camera_intrinsic)\n\n print('normals')\n pcd.normals = o3d.utility.Vector3dVector(\n np.zeros((1, 3))) # invalidate existing normals\n pcd.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n pcd.orient_normals_towards_camera_location(\n camera_location=np.array([0., 0., 1000.]))\n pcd.transform([[1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]])\n pcd.transform([[-1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n print('run Poisson surface reconstruction')\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n\n voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n print(f'voxel_size = {voxel_size:e}')\n mesh = mesh_raw.simplify_vertex_clustering(\n voxel_size=voxel_size,\n contraction=o3d.geometry.SimplificationContraction.Average)\n\n # vertices_to_remove = densities < np.quantile(densities, 0.001)\n # mesh.remove_vertices_by_mask(vertices_to_remove)\n bbox = pcd.get_axis_aligned_bounding_box()\n mesh_crop = mesh.crop(bbox)\n gltf_path = f'./{image_path.stem}.gltf'\n o3d.io.write_triangle_mesh(\n gltf_path, mesh_crop, write_triangle_uvs=True)\n return gltf_path\n\ntitle = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\ndescription = \"This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\nexamples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n\niface = gr.Interface(fn=process_image,\n inputs=[gr.Image(\n type=\"filepath\", label=\"Input Image\")],\n outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n 1.0, 1.0, 1.0, 1.0]),\n gr.File(label=\"3d gLTF\")],\n title=title,\n description=description,\n examples=examples,\n allow_flagging=\"never\",\n cache_examples=False)\n\niface.launch(debug=True, enable_queue=False)", "text": "A demo for predicting the depth of an image and generating a 3D model of it."}]}, {"category": "\ud83d\udcc8 Tabular Data & Plots", "demos": [{"name": "Interactive Dashboard", "dir": "dashboard", "code": "import gradio as gr\nimport pandas as pd\nimport plotly.express as px\nfrom helpers import *\n\n\nLIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n\n\ndef create_pip_plot(libraries, pip_choices):\n if \"Pip\" not in pip_choices:\n return gr.update(visible=False)\n output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Pip installs\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_star_plot(libraries, star_choices):\n if \"Stars\" not in star_choices:\n return gr.update(visible=False)\n output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Number of stargazers\")\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\ndef create_issue_plot(libraries, issue_choices):\n if \"Issue\" not in issue_choices:\n return gr.update(visible=False)\n output = retrieve_issues(libraries,\n exclude_org_members=\"Exclude org members\" in issue_choices,\n week_over_week=\"Week over Week\" in issue_choices)\n df = pd.DataFrame(output).melt(id_vars=\"day\")\n plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n title=\"Cumulated number of issues, PRs, and comments\",\n )\n plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n return gr.update(value=plot, visible=True)\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select libraries to display\")\n libraries = gr.CheckboxGroup(choices=LIBRARIES, label=\"\")\n with gr.Column():\n with gr.Box():\n gr.Markdown(\"## Select graphs to display\")\n pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], label=\"\")\n stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], label=\"\")\n issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], label=\"\")\n with gr.Row():\n fetch = gr.Button(value=\"Fetch\")\n with gr.Row():\n with gr.Column():\n pip_plot = gr.Plot(visible=False)\n star_plot = gr.Plot(visible=False)\n issue_plot = gr.Plot(visible=False)\n\n fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n\n\nif __name__ == \"__main__\":\n demo.launch()", "text": "This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets."}, {"name": "Dashboard with Live Updates", "dir": "live_dashboard", "code": "import math\n\nimport pandas as pd\n\nimport gradio as gr\nimport datetime\nimport numpy as np\n\n\ndef get_time():\n return datetime.datetime.now()\n\n\nplot_end = 2 * math.pi\n\n\ndef get_plot(period=1):\n global plot_end\n x = np.arange(plot_end - 2 * math.pi, plot_end, 0.02)\n y = np.sin(2 * math.pi * period * x)\n update = gr.LinePlot.update(\n value=pd.DataFrame({\"x\": x, \"y\": y}),\n x=\"x\",\n y=\"y\",\n title=\"Plot (updates every second)\",\n width=600,\n height=350,\n )\n plot_end += 2 * math.pi\n if plot_end > 1000:\n plot_end = 2 * math.pi\n return update\n\n\nwith gr.Blocks() as demo:\n with gr.Row():\n with gr.Column():\n c_time2 = gr.Textbox(label=\"Current Time refreshed every second\")\n gr.Textbox(\n \"Change the value of the slider to automatically update the plot\",\n label=\"\",\n )\n period = gr.Slider(\n label=\"Period of plot\", value=1, minimum=0, maximum=10, step=1\n )\n plot = gr.LinePlot(show_label=False)\n with gr.Column():\n name = gr.Textbox(label=\"Enter your name\")\n greeting = gr.Textbox(label=\"Greeting\")\n button = gr.Button(value=\"Greet\")\n button.click(lambda s: f\"Hello {s}\", name, greeting)\n\n demo.load(lambda: datetime.datetime.now(), None, c_time2, every=1)\n dep = demo.load(get_plot, None, plot, every=1)\n period.change(get_plot, period, plot, every=1, cancels=[dep])\n\nif __name__ == \"__main__\":\n demo.queue().launch()\n", "text": "This demo shows how you can build a live interactive dashboard with gradio.\nThe current time is refreshed every second and the plot every half second by using the 'every' keyword in the event handler.\nChanging the value of the slider will control the period of the sine curve (the distance between peaks). "}, {"name": "Interactive Map of AirBnB Locations", "dir": "map_airbnb", "code": "import gradio as gr\nimport plotly.graph_objects as go\nfrom datasets import load_dataset\n\ndataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\ndf = dataset.to_pandas()\n\ndef filter_map(min_price, max_price, boroughs):\n\n filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n (df['price'] > min_price) & (df['price'] < max_price)]\n names = filtered_df[\"name\"].tolist()\n prices = filtered_df[\"price\"].tolist()\n text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n fig = go.Figure(go.Scattermapbox(\n customdata=text_list,\n lat=filtered_df['latitude'].tolist(),\n lon=filtered_df['longitude'].tolist(),\n mode='markers',\n marker=go.scattermapbox.Marker(\n size=6\n ),\n hoverinfo=\"text\",\n hovertemplate='<b>Name</b>: %{customdata[0]}<br><b>Price</b>: $%{customdata[1]}'\n ))\n\n fig.update_layout(\n mapbox_style=\"open-street-map\",\n hovermode='closest',\n mapbox=dict(\n bearing=0,\n center=go.layout.mapbox.Center(\n lat=40.67,\n lon=-73.90\n ),\n pitch=0,\n zoom=9\n ),\n )\n\n return fig\n\nwith gr.Blocks() as demo:\n with gr.Column():\n with gr.Row():\n min_price = gr.Number(value=250, label=\"Minimum Price\")\n max_price = gr.Number(value=1000, label=\"Maximum Price\")\n boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n btn = gr.Button(value=\"Update Filter\")\n map = gr.Plot().style()\n demo.load(filter_map, [min_price, max_price, boroughs], map)\n btn.click(filter_map, [min_price, max_price, boroughs], map)\n\nif __name__ == \"__main__\":\n demo.launch()", "text": "Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. "}, {"name": "Outbreak Forecast", "dir": "outbreak_forecast", "code": "import altair\n\nimport gradio as gr\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport plotly.express as px\nimport pandas as pd\n\n\ndef outbreak(plot_type, r, month, countries, social_distancing):\n months = [\"January\", \"February\", \"March\", \"April\", \"May\"]\n m = months.index(month)\n start_day = 30 * m\n final_day = 30 * (m + 1)\n x = np.arange(start_day, final_day + 1)\n pop_count = {\"USA\": 350, \"Canada\": 40, \"Mexico\": 300, \"UK\": 120}\n if social_distancing:\n r = sqrt(r)\n df = pd.DataFrame({\"day\": x})\n for country in countries:\n df[country] = x ** (r) * (pop_count[country] + 1)\n\n if plot_type == \"Matplotlib\":\n fig = plt.figure()\n plt.plot(df[\"day\"], df[countries].to_numpy())\n plt.title(\"Outbreak in \" + month)\n plt.ylabel(\"Cases\")\n plt.xlabel(\"Days since Day 0\")\n plt.legend(countries)\n return fig\n elif plot_type == \"Plotly\":\n fig = px.line(df, x=\"day\", y=countries)\n fig.update_layout(\n title=\"Outbreak in \" + month,\n xaxis_title=\"Cases\",\n yaxis_title=\"Days Since Day 0\",\n )\n return fig\n elif plot_type == \"Altair\":\n df = df.melt(id_vars=\"day\").rename(columns={\"variable\": \"country\"})\n fig = altair.Chart(df).mark_line().encode(x=\"day\", y='value', color='country')\n return fig\n else:\n raise ValueError(\"A plot type must be selected\")\n\n\ninputs = [\n gr.Dropdown([\"Matplotlib\", \"Plotly\", \"Altair\"], label=\"Plot Type\"),\n gr.Slider(1, 4, 3.2, label=\"R\"),\n gr.Dropdown([\"January\", \"February\", \"March\", \"April\", \"May\"], label=\"Month\"),\n gr.CheckboxGroup(\n [\"USA\", \"Canada\", \"Mexico\", \"UK\"], label=\"Countries\", value=[\"USA\", \"Canada\"]\n ),\n gr.Checkbox(label=\"Social Distancing?\"),\n]\noutputs = gr.Plot()\n\ndemo = gr.Interface(\n fn=outbreak,\n inputs=inputs,\n outputs=outputs,\n examples=[\n [\"Matplotlib\", 2, \"March\", [\"Mexico\", \"UK\"], True],\n [\"Altair\", 2, \"March\", [\"Mexico\", \"Canada\"], True],\n [\"Plotly\", 3.6, \"February\", [\"Canada\", \"Mexico\", \"UK\"], False],\n ],\n cache_examples=True,\n)\n\nif __name__ == \"__main__\":\n demo.launch()\n\n\n\n", "text": "Generate a plot based on 5 inputs."}, {"name": "Clustering with Scikit-Learn", "dir": "clustering", "code": "import gradio as gr\nimport math\nfrom functools import partial\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import (\n AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n)\nfrom sklearn.datasets import make_blobs, make_circles, make_moons\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.neighbors import kneighbors_graph\nfrom sklearn.preprocessing import StandardScaler\n\nplt.style.use('seaborn')\nSEED = 0\nMAX_CLUSTERS = 10\nN_SAMPLES = 1000\nN_COLS = 3\nFIGSIZE = 7, 7 # does not affect size in webpage\nCOLORS = [\n 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n]\nassert len(COLORS) >= MAX_CLUSTERS, \"Not enough different colors for all clusters\"\nnp.random.seed(SEED)\n\n\ndef normalize(X):\n return StandardScaler().fit_transform(X)\n\ndef get_regular(n_clusters):\n # spiral pattern\n centers = [\n [0, 0],\n [1, 0],\n [1, 1],\n [0, 1],\n [-1, 1],\n [-1, 0],\n [-1, -1],\n [0, -1],\n [1, -1],\n [2, -1],\n ][:n_clusters]\n assert len(centers) == n_clusters\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_circles(n_clusters):\n X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_moons(n_clusters):\n X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n return normalize(X), labels\n\n\ndef get_noise(n_clusters):\n np.random.seed(SEED)\n X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n return normalize(X), labels\n\n\ndef get_anisotropic(n_clusters):\n X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n transformation = [[0.6, -0.6], [-0.4, 0.8]]\n X = np.dot(X, transformation)\n return X, labels\n\n\ndef get_varied(n_clusters):\n cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n assert len(cluster_std) == n_clusters\n X, labels = make_blobs(\n n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n )\n return normalize(X), labels\n\n\ndef get_spiral(n_clusters):\n # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n np.random.seed(SEED)\n t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n x = t * np.cos(t)\n y = t * np.sin(t)\n X = np.concatenate((x, y))\n X += 0.7 * np.random.randn(2, N_SAMPLES)\n X = np.ascontiguousarray(X.T)\n\n labels = np.zeros(N_SAMPLES, dtype=int)\n return normalize(X), labels\n\n\nDATA_MAPPING = {\n 'regular': get_regular,\n 'circles': get_circles,\n 'moons': get_moons,\n 'spiral': get_spiral,\n 'noise': get_noise,\n 'anisotropic': get_anisotropic,\n 'varied': get_varied,\n}\n\n\ndef get_groundtruth_model(X, labels, n_clusters, **kwargs):\n # dummy model to show true label distribution\n class Dummy:\n def __init__(self, y):\n self.labels_ = labels\n\n return Dummy(labels)\n\n\ndef get_kmeans(X, labels, n_clusters, **kwargs):\n model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_dbscan(X, labels, n_clusters, **kwargs):\n model = DBSCAN(eps=0.3)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_agglomerative(X, labels, n_clusters, **kwargs):\n connectivity = kneighbors_graph(\n X, n_neighbors=n_clusters, include_self=False\n )\n # make connectivity symmetric\n connectivity = 0.5 * (connectivity + connectivity.T)\n model = AgglomerativeClustering(\n n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_meanshift(X, labels, n_clusters, **kwargs):\n bandwidth = estimate_bandwidth(X, quantile=0.25)\n model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_spectral(X, labels, n_clusters, **kwargs):\n model = SpectralClustering(\n n_clusters=n_clusters,\n eigen_solver=\"arpack\",\n affinity=\"nearest_neighbors\",\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_optics(X, labels, n_clusters, **kwargs):\n model = OPTICS(\n min_samples=7,\n xi=0.05,\n min_cluster_size=0.1,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_birch(X, labels, n_clusters, **kwargs):\n model = Birch(n_clusters=n_clusters)\n model.set_params(**kwargs)\n return model.fit(X)\n\n\ndef get_gaussianmixture(X, labels, n_clusters, **kwargs):\n model = GaussianMixture(\n n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n )\n model.set_params(**kwargs)\n return model.fit(X)\n\n\nMODEL_MAPPING = {\n 'True labels': get_groundtruth_model,\n 'KMeans': get_kmeans,\n 'DBSCAN': get_dbscan,\n 'MeanShift': get_meanshift,\n 'SpectralClustering': get_spectral,\n 'OPTICS': get_optics,\n 'Birch': get_birch,\n 'GaussianMixture': get_gaussianmixture,\n 'AgglomerativeClustering': get_agglomerative,\n}\n\n\ndef plot_clusters(ax, X, labels):\n set_clusters = set(labels)\n set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n for label, color in zip(sorted(set_clusters), COLORS):\n idx = labels == label\n if not sum(idx):\n continue\n ax.scatter(X[idx, 0], X[idx, 1], color=color)\n\n # show outliers (if any)\n idx = labels == -1\n if sum(idx):\n ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n\n ax.grid(None)\n ax.set_xticks([])\n ax.set_yticks([])\n return ax\n\n\ndef cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n if isinstance(n_clusters, dict):\n n_clusters = n_clusters['value']\n else:\n n_clusters = int(n_clusters)\n\n X, labels = DATA_MAPPING[dataset](n_clusters)\n model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n if hasattr(model, \"labels_\"):\n y_pred = model.labels_.astype(int)\n else:\n y_pred = model.predict(X)\n\n fig, ax = plt.subplots(figsize=FIGSIZE)\n\n plot_clusters(ax, X, y_pred)\n ax.set_title(clustering_algorithm, fontsize=16)\n\n return fig\n\n\ntitle = \"Clustering with Scikit-learn\"\ndescription = (\n \"This example shows how different clustering algorithms work. Simply pick \"\n \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n \"Colored circles are (predicted) labels and black x are outliers.\"\n)\n\n\ndef iter_grid(n_rows, n_cols):\n # create a grid using gradio Block\n for _ in range(n_rows):\n with gr.Row():\n for _ in range(n_cols):\n with gr.Column():\n yield\n\nwith gr.Blocks(title=title) as demo:\n gr.HTML(f\"<b>{title}</b>\")\n gr.Markdown(description)\n\n input_models = list(MODEL_MAPPING)\n input_data = gr.Radio(\n list(DATA_MAPPING),\n value=\"regular\",\n label=\"dataset\"\n )\n input_n_clusters = gr.Slider(\n minimum=1,\n maximum=MAX_CLUSTERS,\n value=4,\n step=1,\n label='Number of clusters'\n )\n n_rows = int(math.ceil(len(input_models) / N_COLS))\n counter = 0\n for _ in iter_grid(n_rows, N_COLS):\n if counter >= len(input_models):\n break\n\n input_model = input_models[counter]\n plot = gr.Plot(label=input_model)\n fn = partial(cluster, clustering_algorithm=input_model)\n input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n counter += 1\n\ndemo.launch()\n", "text": "This demo built with Blocks generates 9 plots based on the input."}, {"name": "Time Series Forecasting", "dir": "timeseries-forecasting-with-prophet", "code": "import gradio as gr\nimport pypistats\nfrom datetime import date\nfrom dateutil.relativedelta import relativedelta\nimport pandas as pd\nfrom prophet import Prophet\npd.options.plotting.backend = \"plotly\"\n\ndef get_forecast(lib, time):\n\n data = pypistats.overall(lib, total=True, format=\"pandas\")\n data = data.groupby(\"category\").get_group(\"with_mirrors\").sort_values(\"date\")\n start_date = date.today() - relativedelta(months=int(time.split(\" \")[0]))\n df = data[(data['date'] > str(start_date))] \n\n df1 = df[['date','downloads']]\n df1.columns = ['ds','y']\n\n m = Prophet()\n m.fit(df1)\n future = m.make_future_dataframe(periods=90)\n forecast = m.predict(future)\n fig1 = m.plot(forecast)\n return fig1 \n\nwith gr.Blocks() as demo:\n gr.Markdown(\n \"\"\"\n **Pypi Download Stats \ud83d\udcc8 with Prophet Forecasting**: see live download stats for popular open-source libraries \ud83e\udd17 along with a 3 month forecast using Prophet. The [ source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/timeseries-forecasting-with-prophet/blob/main/app.py).\n \"\"\")\n with gr.Row():\n lib = gr.Dropdown([\"pandas\", \"scikit-learn\", \"torch\", \"prophet\"], label=\"Library\", value=\"pandas\")\n time = gr.Dropdown([\"3 months\", \"6 months\", \"9 months\", \"12 months\"], label=\"Downloads over the last...\", value=\"12 months\")\n\n plt = gr.Plot()\n\n lib.change(get_forecast, [lib, time], plt, queue=False)\n time.change(get_forecast, [lib, time], plt, queue=False) \n demo.load(get_forecast, [lib, time], plt, queue=False) \n\ndemo.launch()", "text": "A simple dashboard showing pypi stats for python libraries. Updates on load, and has no buttons!"}, {"name": "Income Classification with XGBoost", "dir": "xgboost-income-prediction-with-explainability", "code": "import gradio as gr\nimport random\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport shap\nimport xgboost as xgb\nfrom datasets import load_dataset\n\n\ndataset = load_dataset(\"scikit-learn/adult-census-income\")\nX_train = dataset[\"train\"].to_pandas()\n_ = X_train.pop(\"fnlwgt\")\n_ = X_train.pop(\"race\")\ny_train = X_train.pop(\"income\")\ny_train = (y_train == \">50K\").astype(int)\ncategorical_columns = [\n \"workclass\",\n \"education\",\n \"marital.status\",\n \"occupation\",\n \"relationship\",\n \"sex\",\n \"native.country\",\n]\nX_train = X_train.astype({col: \"category\" for col in categorical_columns})\ndata = xgb.DMatrix(X_train, label=y_train, enable_categorical=True)\nmodel = xgb.train(params={\"objective\": \"binary:logistic\"}, dtrain=data)\nexplainer = shap.TreeExplainer(model)\n\ndef predict(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n pos_pred = model.predict(xgb.DMatrix(df, enable_categorical=True))\n return {\">50K\": float(pos_pred[0]), \"<=50K\": 1 - float(pos_pred[0])}\n\n\ndef interpret(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True))\n scores_desc = list(zip(shap_values[0], X_train.columns))\n scores_desc = sorted(scores_desc)\n fig_m = plt.figure(tight_layout=True)\n plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc])\n plt.title(\"Feature Shap Values\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Feature\")\n plt.tight_layout()\n return fig_m\n\n\nunique_class = sorted(X_train[\"workclass\"].unique())\nunique_education = sorted(X_train[\"education\"].unique())\nunique_marital_status = sorted(X_train[\"marital.status\"].unique())\nunique_relationship = sorted(X_train[\"relationship\"].unique())\nunique_occupation = sorted(X_train[\"occupation\"].unique())\nunique_sex = sorted(X_train[\"sex\"].unique())\nunique_country = sorted(X_train[\"native.country\"].unique())\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"\"\"\n **Income Classification with XGBoost \ud83d\udcb0**: This demo uses an XGBoost classifier predicts income based on demographic factors, along with Shapley value-based *explanations*. The [source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability/blob/main/app.py).\n \"\"\")\n with gr.Row():\n with gr.Column():\n age = gr.Slider(label=\"Age\", minimum=17, maximum=90, step=1, randomize=True)\n work_class = gr.Dropdown(\n label=\"Workclass\",\n choices=unique_class,\n value=lambda: random.choice(unique_class),\n )\n education = gr.Dropdown(\n label=\"Education Level\",\n choices=unique_education,\n value=lambda: random.choice(unique_education),\n )\n years = gr.Slider(\n label=\"Years of schooling\",\n minimum=1,\n maximum=16,\n step=1,\n randomize=True,\n )\n marital_status = gr.Dropdown(\n label=\"Marital Status\",\n choices=unique_marital_status,\n value=lambda: random.choice(unique_marital_status),\n )\n occupation = gr.Dropdown(\n label=\"Occupation\",\n choices=unique_occupation,\n value=lambda: random.choice(unique_occupation),\n )\n relationship = gr.Dropdown(\n label=\"Relationship Status\",\n choices=unique_relationship,\n value=lambda: random.choice(unique_relationship),\n )\n sex = gr.Dropdown(\n label=\"Sex\", choices=unique_sex, value=lambda: random.choice(unique_sex)\n )\n capital_gain = gr.Slider(\n label=\"Capital Gain\",\n minimum=0,\n maximum=100000,\n step=500,\n randomize=True,\n )\n capital_loss = gr.Slider(\n label=\"Capital Loss\", minimum=0, maximum=10000, step=500, randomize=True\n )\n hours_per_week = gr.Slider(\n label=\"Hours Per Week Worked\", minimum=1, maximum=99, step=1\n )\n country = gr.Dropdown(\n label=\"Native Country\",\n choices=unique_country,\n value=lambda: random.choice(unique_country),\n )\n with gr.Column():\n label = gr.Label()\n plot = gr.Plot()\n with gr.Row():\n predict_btn = gr.Button(value=\"Predict\")\n interpret_btn = gr.Button(value=\"Explain\")\n predict_btn.click(\n predict,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[label],\n )\n interpret_btn.click(\n interpret,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[plot],\n )\n\ndemo.launch()\n", "text": "This demo takes in 12 inputs from the user in dropdowns and sliders and predicts income. It also has a separate button for explaining the prediction."}, {"name": "Leaderboard", "dir": "leaderboard", "code": "import gradio as gr\nimport requests\nimport pandas as pd\nfrom huggingface_hub.hf_api import SpaceInfo\npath = f\"https://huggingface.co/api/spaces\"\n\n\ndef get_blocks_party_spaces():\n r = requests.get(path)\n d = r.json()\n spaces = [SpaceInfo(**x) for x in d]\n blocks_spaces = {}\n for i in range(0,len(spaces)):\n if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n blocks_spaces[spaces[i].id]=spaces[i].likes\n df = pd.DataFrame(\n [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n df = df.sort_values(by=['likes'],ascending=False)\n return df\n\nblock = gr.Blocks()\n\nwith block: \n gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see <a href=\"https://huggingface.co/Gradio-Blocks\" target=\"_blank\" style=\"text-decoration: underline\">Blocks Party Event</a>\"\"\")\n with gr.Tabs():\n with gr.TabItem(\"Blocks Party Leaderboard\"):\n with gr.Row():\n data = gr.outputs.Dataframe(type=\"pandas\")\n with gr.Row():\n data_run = gr.Button(\"Refresh\")\n data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n # running the function on page load in addition to when the button is clicked\n block.load(get_blocks_party_spaces, inputs=None, outputs=data) \n\nblock.launch()\n\n", "text": "A simple dashboard ranking spaces by number of likes."}, {"name": "Tax Calculator", "dir": "tax_calculator", "code": "import gradio as gr\n\ndef tax_calculator(income, marital_status, assets):\n tax_brackets = [(10, 0), (25, 8), (60, 12), (120, 20), (250, 30)]\n total_deductible = sum(assets[\"Cost\"])\n taxable_income = income - total_deductible\n\n total_tax = 0\n for bracket, rate in tax_brackets:\n if taxable_income > bracket:\n total_tax += (taxable_income - bracket) * rate / 100\n\n if marital_status == \"Married\":\n total_tax *= 0.75\n elif marital_status == \"Divorced\":\n total_tax *= 0.8\n\n return round(total_tax)\n\ndemo = gr.Interface(\n tax_calculator,\n [\n \"number\",\n gr.Radio([\"Single\", \"Married\", \"Divorced\"]),\n gr.Dataframe(\n headers=[\"Item\", \"Cost\"],\n datatype=[\"str\", \"number\"],\n label=\"Assets Purchased this Year\",\n ),\n ],\n \"number\",\n examples=[\n [10000, \"Married\", [[\"Suit\", 5000], [\"Laptop\", 800], [\"Car\", 1800]]],\n [80000, \"Single\", [[\"Suit\", 800], [\"Watch\", 1800], [\"Car\", 800]]],\n ],\n)\n\ndemo.launch()\n", "text": "Calculate taxes using Textbox, Radio, and Dataframe components"}]}, {"category": "\ud83c\udfa4 Audio & Speech", "demos": [{"name": "Text to Speech", "dir": "neon-tts-plugin-coqui", "code": "import tempfile\nimport gradio as gr\nfrom neon_tts_plugin_coqui import CoquiTTS\n\nLANGUAGES = list(CoquiTTS.langs.keys())\ncoquiTTS = CoquiTTS()\n\ndef tts(text: str, language: str):\n with tempfile.NamedTemporaryFile(suffix=\".wav\", delete=False) as fp:\n coquiTTS.get_tts(text, fp, speaker = {\"language\" : language})\n return fp.name\n\ninputs = [gr.Textbox(label=\"Input\", value=CoquiTTS.langs[\"en\"][\"sentence\"], max_lines=3), \n gr.Radio(label=\"Language\", choices=LANGUAGES, value=\"en\")]\noutputs = gr.Audio(label=\"Output\")\n\ndemo = gr.Interface(fn=tts, inputs=inputs, outputs=outputs)\n\ndemo.launch()", "text": "This demo converts text to speech in 14 languages."}, {"name": "Speech to Text (ASR)", "dir": "automatic-speech-recognition", "code": "import gradio as gr\nimport os\n\n# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\nauth_token = os.getenv(\"auth_token\")\n\n# automatically load the interface from a HF model \n# you can remove the api_key parameter if you don't care about rate limiting. \ndemo = gr.load(\n \"huggingface/facebook/wav2vec2-base-960h\",\n title=\"Speech-to-text\",\n inputs=\"mic\",\n description=\"Let me try to guess what you're saying!\",\n hf_token=auth_token\n)\n\ndemo.launch()\n", "text": "Automatic speech recognition English. Record from your microphone and the app will transcribe the audio."}, {"name": "Musical Instrument Identification", "dir": "musical_instrument_identification", "code": "import gradio as gr\nimport torch\nimport torchaudio\nfrom timeit import default_timer as timer\nfrom data_setups import audio_preprocess, resample\nimport gdown\n\nurl = 'https://drive.google.com/uc?id=1X5CR18u0I-ZOi_8P0cNptCe5JGk9Ro0C'\noutput = 'piano.wav'\ngdown.download(url, output, quiet=False)\nurl = 'https://drive.google.com/uc?id=1W-8HwmGR5SiyDbUcGAZYYDKdCIst07__'\noutput= 'torch_efficientnet_fold2_CNN.pth'\ngdown.download(url, output, quiet=False)\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nSAMPLE_RATE = 44100\nAUDIO_LEN = 2.90\nmodel = torch.load(\"torch_efficientnet_fold2_CNN.pth\", map_location=torch.device('cpu'))\nLABELS = [\n \"Cello\", \"Clarinet\", \"Flute\", \"Acoustic Guitar\", \"Electric Guitar\", \"Organ\", \"Piano\", \"Saxophone\", \"Trumpet\", \"Violin\", \"Voice\"\n]\nexample_list = [\n [\"piano.wav\"]\n]\n\n\ndef predict(audio_path):\n start_time = timer()\n wavform, sample_rate = torchaudio.load(audio_path)\n wav = resample(wavform, sample_rate, SAMPLE_RATE)\n if len(wav) > int(AUDIO_LEN * SAMPLE_RATE):\n wav = wav[:int(AUDIO_LEN * SAMPLE_RATE)]\n else:\n print(f\"input length {len(wav)} too small!, need over {int(AUDIO_LEN * SAMPLE_RATE)}\")\n return\n img = audio_preprocess(wav, SAMPLE_RATE).unsqueeze(0)\n model.eval()\n with torch.inference_mode():\n pred_probs = torch.softmax(model(img), dim=1)\n pred_labels_and_probs = {LABELS[i]: float(pred_probs[0][i]) for i in range(len(LABELS))}\n pred_time = round(timer() - start_time, 5)\n return pred_labels_and_probs, pred_time\n\ndemo = gr.Interface(fn=predict,\n inputs=gr.Audio(type=\"filepath\"),\n outputs=[gr.Label(num_top_classes=11, label=\"Predictions\"), \n gr.Number(label=\"Prediction time (s)\")],\n examples=example_list,\n cache_examples=False\n )\n\ndemo.launch(debug=False)\n", "text": "This demo identifies musical instruments from an audio file. It uses Gradio's Audio and Label components."}, {"name": "Speaker Verification", "dir": "same-person-or-different", "code": "import gradio as gr\nimport torch\nfrom torchaudio.sox_effects import apply_effects_file\nfrom transformers import AutoFeatureExtractor, AutoModelForAudioXVector\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nOUTPUT_OK = (\n \"\"\"\n <div class=\"container\">\n <div class=\"row\"><h1 style=\"text-align: center\">The speakers are</h1></div>\n <div class=\"row\"><h1 class=\"display-1 text-success\" style=\"text-align: center\">{:.1f}%</h1></div>\n <div class=\"row\"><h1 style=\"text-align: center\">similar</h1></div>\n <div class=\"row\"><h1 class=\"text-success\" style=\"text-align: center\">Welcome, human!</h1></div>\n <div class=\"row\"><small style=\"text-align: center\">(You must get at least 85% to be considered the same person)</small><div class=\"row\">\n </div>\n\"\"\"\n)\nOUTPUT_FAIL = (\n \"\"\"\n <div class=\"container\">\n <div class=\"row\"><h1 style=\"text-align: center\">The speakers are</h1></div>\n <div class=\"row\"><h1 class=\"display-1 text-danger\" style=\"text-align: center\">{:.1f}%</h1></div>\n <div class=\"row\"><h1 style=\"text-align: center\">similar</h1></div>\n <div class=\"row\"><h1 class=\"text-danger\" style=\"text-align: center\">You shall not pass!</h1></div>\n <div class=\"row\"><small style=\"text-align: center\">(You must get at least 85% to be considered the same person)</small><div class=\"row\">\n </div>\n\"\"\"\n)\n\nEFFECTS = [\n [\"remix\", \"-\"],\n [\"channels\", \"1\"],\n [\"rate\", \"16000\"],\n [\"gain\", \"-1.0\"],\n [\"silence\", \"1\", \"0.1\", \"0.1%\", \"-1\", \"0.1\", \"0.1%\"],\n [\"trim\", \"0\", \"10\"],\n]\n\nTHRESHOLD = 0.85\n\nmodel_name = \"microsoft/unispeech-sat-base-plus-sv\"\nfeature_extractor = AutoFeatureExtractor.from_pretrained(model_name)\nmodel = AutoModelForAudioXVector.from_pretrained(model_name).to(device)\ncosine_sim = torch.nn.CosineSimilarity(dim=-1)\n\n\ndef similarity_fn(path1, path2):\n if not (path1 and path2):\n return '<b style=\"color:red\">ERROR: Please record audio for *both* speakers!</b>'\n\n wav1, _ = apply_effects_file(path1, EFFECTS)\n wav2, _ = apply_effects_file(path2, EFFECTS)\n print(wav1.shape, wav2.shape)\n\n input1 = feature_extractor(wav1.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n input2 = feature_extractor(wav2.squeeze(0), return_tensors=\"pt\", sampling_rate=16000).input_values.to(device)\n\n with torch.no_grad():\n emb1 = model(input1).embeddings\n emb2 = model(input2).embeddings\n emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()\n emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()\n similarity = cosine_sim(emb1, emb2).numpy()[0]\n\n if similarity >= THRESHOLD:\n output = OUTPUT_OK.format(similarity * 100)\n else:\n output = OUTPUT_FAIL.format(similarity * 100)\n\n return output\n\ninputs = [\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #1\"),\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\", optional=True, label=\"Speaker #2\"),\n]\noutput = gr.outputs.HTML(label=\"\")\n\n\ndescription = (\n \"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. \"\n \"Try it with your own voice!\"\n)\narticle = (\n \"<p style='text-align: center'>\"\n \"<a href='https://huggingface.co/microsoft/unispeech-sat-large-sv' target='_blank'>\ud83c\udf99\ufe0f Learn more about UniSpeech-SAT</a> | \"\n \"<a href='https://arxiv.org/abs/2110.05752' target='_blank'>\ud83d\udcda UniSpeech-SAT paper</a> | \"\n \"<a href='https://www.danielpovey.com/files/2018_icassp_xvectors.pdf' target='_blank'>\ud83d\udcda X-Vector paper</a>\"\n \"</p>\"\n)\nexamples = [\n [\"samples/cate_blanch.mp3\", \"samples/cate_blanch_2.mp3\"],\n [\"samples/cate_blanch.mp3\", \"samples/heath_ledger.mp3\"],\n]\n\ninterface = gr.Interface(\n fn=similarity_fn,\n inputs=inputs,\n outputs=output,\n layout=\"horizontal\",\n allow_flagging=False,\n live=False,\n examples=examples,\n cache_examples=False\n)\ninterface.launch()\n", "text": "This demo identifies if two speakers are the same person using Gradio's Audio and HTML components."}]}]