diff --git a/Readme.md b/Readme.md index b179563..f02d49a 100644 --- a/Readme.md +++ b/Readme.md @@ -5,6 +5,10 @@ DiffusionMagic focused on the following areas: - Cross-platform (Windows/Linux/Mac) - Modular design, latest best optimizations for speed and memory +## Stable diffusion XL Colab +We can run StableDiffusion XL 0.9 on Google Colab +[](https://colab.research.google.com/drive/1KrmcU2gONIQ2WihI1s6uITgDDzkbKaJK?usp=sharing) +  ## Features - Supports various Stable Diffusion workflows @@ -113,6 +117,7 @@ Or we can clone the model use the local folder path as model id. ## Linting (Development) Run the following commands from src folder `mypy --ignore-missing-imports --explicit-package-bases .` + `flake8 --max-line-length=100 .` ## Contribute Contributions are welcomed. diff --git a/configs/stable_diffusion_models.txt b/configs/stable_diffusion_models.txt index 07c4106..f5dd3c0 100644 --- a/configs/stable_diffusion_models.txt +++ b/configs/stable_diffusion_models.txt @@ -16,4 +16,5 @@ lllyasviel/sd-controlnet-hed lllyasviel/sd-controlnet-openpose lllyasviel/sd-controlnet-depth lllyasviel/sd-controlnet-scribble -lllyasviel/sd-controlnet-seg \ No newline at end of file +lllyasviel/sd-controlnet-seg +stabilityai/stable-diffusion-xl-base-1.0 \ No newline at end of file diff --git a/environment.yml b/environment.yml index 895a56e..e85d7e9 100644 --- a/environment.yml +++ b/environment.yml @@ -12,16 +12,17 @@ dependencies: - torchvision=0.15.0 - numpy=1.19.2 - pip: - - accelerate==0.17.1 - - diffusers==0.14.0 - - gradio==3.17.1 - - safetensors==0.2.8 + - accelerate==0.21.0 + - diffusers==0.19.3 + - gradio==3.32.0 + - safetensors==0.3.1 - scipy==1.10.0 - - transformers==4.26.0 + - transformers==4.31.0 - pydantic==1.10.4 - mypy==1.0.0 - black==23.1.0 - flake8==6.0.0 - markupsafe==2.0.1 - opencv-contrib-python==4.7.0.72 - - controlnet-aux==0.0.1 \ No newline at end of file + - controlnet-aux==0.0.1 + - invisible-watermark==0.2.0 \ No newline at end of file diff --git a/src/backend/generate.py b/src/backend/generate.py index bbae16e..bad9e6d 100644 --- a/src/backend/generate.py +++ b/src/backend/generate.py @@ -15,6 +15,7 @@ ) from backend.controlnet.ControlContext import ControlnetContext from backend.stablediffusion.stablediffusion import StableDiffusion +from backend.stablediffusion.stablediffusionxl import StableDiffusionXl from settings import AppSettings @@ -30,6 +31,7 @@ def __init__(self, compute: Computing): self.stable_diffusion_depth = StableDiffusionDepthToImage(compute) self.stable_diffusion_pix_to_pix = StableDiffusionInstructPixToPix(compute) self.controlnet = ControlnetContext(compute) + self.stable_diffusion_xl = StableDiffusionXl(compute) self.app_settings = AppSettings().get_settings() self.model_id = self.app_settings.model_settings.model_id self.low_vram_mode = self.app_settings.low_memory_mode @@ -78,6 +80,15 @@ def _init_stable_diffusion(self): ) self.pipe_initialized = True + def _init_stable_diffusion_xl(self): + if not self.pipe_initialized: + print("Initializing stable diffusion xl pipeline") + self.stable_diffusion_xl.get_text_to_image_xl_pipleline( + self.model_id, + self.low_vram_mode, + ) + self.pipe_initialized = True + def diffusion_image_to_image( self, image, @@ -355,3 +366,78 @@ def diffusion_control_to_image( "CannyToImage", ) return images + + def diffusion_text_to_image_xl( + self, + prompt, + neg_prompt, + image_height, + image_width, + inference_steps, + scheduler, + guidance_scale, + num_images, + attention_slicing, + vae_slicing, + seed, + ) -> Any: + stable_diffusion_settings = StableDiffusionSetting( + prompt=prompt, + negative_prompt=neg_prompt, + image_height=image_height, + image_width=image_width, + inference_steps=inference_steps, + guidance_scale=guidance_scale, + number_of_images=num_images, + scheduler=scheduler, + seed=seed, + attention_slicing=attention_slicing, + vae_slicing=vae_slicing, + ) + self._init_stable_diffusion_xl() + images = self.stable_diffusion_xl.text_to_image_xl(stable_diffusion_settings) + self._save_images( + images, + "TextToImage", + ) + return images + + def diffusion_image_to_image_xl( + self, + image, + strength, + prompt, + neg_prompt, + image_height, + image_width, + inference_steps, + scheduler, + guidance_scale, + num_images, + attention_slicing, + seed, + ) -> Any: + stable_diffusion_image_settings = StableDiffusionImageToImageSetting( + image=image, + strength=strength, + prompt=prompt, + negative_prompt=neg_prompt, + image_height=image_height, + image_width=image_width, + inference_steps=inference_steps, + guidance_scale=guidance_scale, + number_of_images=num_images, + scheduler=scheduler, + seed=seed, + attention_slicing=attention_slicing, + ) + self._init_stable_diffusion_xl() + images = self.stable_diffusion_xl.image_to_image( + stable_diffusion_image_settings + ) + + self._save_images( + images, + "ImageToImage", + ) + return images diff --git a/src/backend/stablediffusion/stable_diffusion_types.py b/src/backend/stablediffusion/stable_diffusion_types.py index e87b132..3a485a6 100644 --- a/src/backend/stablediffusion/stable_diffusion_types.py +++ b/src/backend/stablediffusion/stable_diffusion_types.py @@ -16,6 +16,7 @@ class StableDiffusionType(str, Enum): controlnet_depth = "controlnet_depth" controlnet_scribble = "controlnet_scribble" controlnet_seg = "controlnet_seg" + stable_diffusion_xl = "StableDiffusionXl" def get_diffusion_type( @@ -44,4 +45,6 @@ def get_diffusion_type( stable_diffusion_type = StableDiffusionType.controlnet_scribble elif "controlnet-seg" in model_id: stable_diffusion_type = StableDiffusionType.controlnet_seg + elif "stable-diffusion-xl" in model_id: + stable_diffusion_type = StableDiffusionType.stable_diffusion_xl return stable_diffusion_type diff --git a/src/backend/stablediffusion/stablediffusionxl.py b/src/backend/stablediffusion/stablediffusionxl.py new file mode 100644 index 0000000..8b1cbbd --- /dev/null +++ b/src/backend/stablediffusion/stablediffusionxl.py @@ -0,0 +1,181 @@ +from time import time + +import torch +from diffusers import ( + DiffusionPipeline, + StableDiffusionXLImg2ImgPipeline, +) +from PIL import Image + +from backend.computing import Computing +from backend.stablediffusion.modelmeta import ModelMeta +from backend.stablediffusion.models.scheduler_types import SchedulerType +from backend.stablediffusion.models.setting import ( + StableDiffusionImageToImageSetting, + StableDiffusionSetting, +) +from backend.stablediffusion.scheduler_mixin import SamplerMixin + + +class StableDiffusionXl(SamplerMixin): + def __init__(self, compute: Computing): + self.compute = compute + self.pipeline = None + self.device = self.compute.name + + super().__init__() + + def get_text_to_image_xl_pipleline( + self, + model_id: str = "stabilityai/stable-diffusion-xl-base-1.0", + low_vram_mode: bool = False, + sampler: str = SchedulerType.DPMSolverMultistepScheduler.value, + ): + repo_id = model_id + model_meta = ModelMeta(repo_id) + is_lora_model = model_meta.is_loramodel() + if is_lora_model: + print("LoRA model detected") + self.model_id = model_meta.get_lora_base_model() + print(f"LoRA base model - {self.model_id}") + else: + self.model_id = model_id + + self.low_vram_mode = low_vram_mode + print(f"StableDiffusion - {self.compute.name},{self.compute.datatype}") + print(f"using model {model_id}") + self.default_sampler = self.find_sampler( + sampler, + self.model_id, + ) + tic = time() + self._load_model() + delta = time() - tic + print(f"Model loaded in {delta:.2f}s ") + + if self.pipeline is None: + raise Exception("Text to image pipeline not initialized") + if is_lora_model: + self.pipeline.unet.load_attn_procs(repo_id) + self._pipeline_to_device() + components = self.pipeline.components + self.img_to_img_pipeline = StableDiffusionXLImg2ImgPipeline(**components) + + def text_to_image_xl(self, setting: StableDiffusionSetting): + if self.pipeline is None: + raise Exception("Text to image pipeline not initialized") + + self.pipeline.scheduler = self.find_sampler( + setting.scheduler, + self.model_id, + ) + generator = None + if setting.seed != -1: + print(f"Using seed {setting.seed}") + generator = torch.Generator(self.device).manual_seed(setting.seed) + + # if setting.attention_slicing: + # self.pipeline.enable_attention_slicing() + # else: + # self.pipeline.disable_attention_slicing() + + if setting.vae_slicing: + self.pipeline.enable_vae_slicing() + else: + self.pipeline.disable_vae_slicing() + + images = self.pipeline( + setting.prompt, + guidance_scale=setting.guidance_scale, + num_inference_steps=setting.inference_steps, + height=setting.image_height, + width=setting.image_width, + negative_prompt=setting.negative_prompt, + num_images_per_prompt=setting.number_of_images, + generator=generator, + ).images + + # self.pipeline.unet = torch.compile( + # self.pipeline.unet, + # mode="reduce-overhead", + # fullgraph=True, + # ) + return images + + def _pipeline_to_device(self): + if self.low_vram_mode: + print("Running in low VRAM mode,slower to generate images") + self.pipeline.enable_sequential_cpu_offload() + else: + if self.compute.name == "cuda": + self.pipeline = self.pipeline.to("cuda") + elif self.compute.name == "mps": + self.pipeline = self.pipeline.to("mps") + + def _load_full_precision_model(self): + self.pipeline = DiffusionPipeline.from_pretrained( + self.model_id, + torch_dtype=self.compute.datatype, + scheduler=self.default_sampler, + ) + + def _load_model(self): + if self.compute.name == "cuda": + try: + self.pipeline = DiffusionPipeline.from_pretrained( + self.model_id, + torch_dtype=self.compute.datatype, + scheduler=self.default_sampler, + use_safetensors=True, + variant="fp16", + ) + except Exception as ex: + print( + f" The fp16 of the model not found using full precision model, {ex}" + ) + self._load_full_precision_model() + else: + self._load_full_precision_model() + + def image_to_image(self, setting: StableDiffusionImageToImageSetting): + if setting.scheduler is None: + raise Exception("Scheduler cannot be empty") + + print("Running image to image pipeline") + self.img_to_img_pipeline.scheduler = self.find_sampler( # type: ignore + setting.scheduler, + self.model_id, + ) + generator = None + if setting.seed != -1 and setting.seed: + print(f"Using seed {setting.seed}") + generator = torch.Generator(self.device).manual_seed(setting.seed) + + if setting.attention_slicing: + self.img_to_img_pipeline.enable_attention_slicing() # type: ignore + else: + self.img_to_img_pipeline.disable_attention_slicing() # type: ignore + + if setting.vae_slicing: + self.pipeline.enable_vae_slicing() # type: ignore + else: + self.pipeline.disable_vae_slicing() # type: ignore + + init_image = setting.image.resize( + ( + setting.image_width, + setting.image_height, + ), + Image.Resampling.LANCZOS, + ) + images = self.img_to_img_pipeline( # type: ignore + image=init_image, + strength=setting.strength, + prompt=setting.prompt, + guidance_scale=setting.guidance_scale, + num_inference_steps=setting.inference_steps, + negative_prompt=setting.negative_prompt, + num_images_per_prompt=setting.number_of_images, + generator=generator, + ).images + return images diff --git a/src/constants.py b/src/constants.py index 79f2bca..465b7e0 100644 --- a/src/constants.py +++ b/src/constants.py @@ -1,4 +1,4 @@ -VERSION = "2.0.0-beta.0" +VERSION = "3.0.0" STABLE_DIFFUSION_MODELS_FILE = "stable_diffusion_models.txt" APP_SETTINGS_FILE = "settings.yaml" CONFIG_DIRECTORY = "configs" diff --git a/src/frontend/web/depth_to_image_ui.py b/src/frontend/web/depth_to_image_ui.py index a4c7cd4..367e68d 100644 --- a/src/frontend/web/depth_to_image_ui.py +++ b/src/frontend/web/depth_to_image_ui.py @@ -114,7 +114,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) generate_btn.click( fn=generate_callback_fn, diff --git a/src/frontend/web/image_inpainting_ui.py b/src/frontend/web/image_inpainting_ui.py index edd9b48..3a1f05d 100644 --- a/src/frontend/web/image_inpainting_ui.py +++ b/src/frontend/web/image_inpainting_ui.py @@ -69,7 +69,7 @@ def random_seed(): label="Number of images to generate", ) attn_slicing = gr.Checkbox( - label="Attention slicing (Enable if low VRAM)", + label="Attention slicing (Not used)", value=True, ) seed = gr.Number( @@ -105,7 +105,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) generate_btn.click( fn=generate_callback_fn, diff --git a/src/frontend/web/image_inpainting_xl_ui.py b/src/frontend/web/image_inpainting_xl_ui.py new file mode 100644 index 0000000..be77595 --- /dev/null +++ b/src/frontend/web/image_inpainting_xl_ui.py @@ -0,0 +1,115 @@ +from typing import Any + +import gradio as gr + +from backend.stablediffusion.models.scheduler_types import ( + SchedulerType, + get_sampler_names, +) + +random_enabled = True + + +def get_image_inpainting_xl_ui(generate_callback_fn: Any) -> None: + with gr.Blocks(): + with gr.Row(): + with gr.Column(): + + def random_seed(): + global random_enabled + random_enabled = not random_enabled + seed_val = -1 + if not random_enabled: + seed_val = 42 + + return gr.Number.update( + interactive=not random_enabled, value=seed_val + ) + + input_image = gr.Image(label="Input image", type="pil", tool="sketch") + prompt = gr.Textbox( + label="Describe the image you'd like to see", + lines=3, + placeholder="A fantasy landscape", + ) + + neg_prompt = gr.Textbox( + label="Don't want to see", + lines=1, + placeholder="", + value="bad, deformed, ugly, bad anatomy", + ) + with gr.Accordion("Advanced options", open=False): + image_height = gr.Slider( + 1024, 2048, value=1024, step=64, label="Image Height" + ) + image_width = gr.Slider( + 1024, 2048, value=1024, step=64, label="Image Width" + ) + num_inference_steps = gr.Slider( + 1, 100, value=20, step=1, label="Inference Steps" + ) + scheduler = gr.Dropdown( + get_sampler_names(), + value=SchedulerType.DPMSolverMultistepScheduler.value, + label="Sampler", + ) + guidance_scale = gr.Slider( + 1.0, + 30.0, + value=7.5, + step=0.5, + label="Guidance Scale", + ) + num_images = gr.Slider( + 1, + 50, + value=1, + step=1, + label="Number of images to generate", + ) + attn_slicing = gr.Checkbox( + label="Attention slicing (Enable if low VRAM)", + value=True, + ) + seed = gr.Number( + label="Seed", + value=-1, + precision=0, + interactive=False, + ) + seed_checkbox = gr.Checkbox( + label="Use random seed", + value=True, + interactive=True, + ) + + input_params = [ + input_image, + prompt, + neg_prompt, + image_height, + image_width, + num_inference_steps, + scheduler, + guidance_scale, + num_images, + attn_slicing, + seed, + ] + + with gr.Column(): + generate_btn = gr.Button("Inpaint!", elem_id="generate_button") + output = gr.Gallery( + label="Generated images", + show_label=True, + elem_id="gallery", + ).style( + columns=2, + ) + generate_btn.click( + fn=generate_callback_fn, + inputs=input_params, + outputs=output, + ) + seed_checkbox.change(fn=random_seed, outputs=seed) diff --git a/src/frontend/web/image_to_image_ui.py b/src/frontend/web/image_to_image_ui.py index 619e497..f9975fb 100644 --- a/src/frontend/web/image_to_image_ui.py +++ b/src/frontend/web/image_to_image_ui.py @@ -114,7 +114,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) generate_btn.click( fn=generate_callback_fn, diff --git a/src/frontend/web/image_to_image_xl_ui.py b/src/frontend/web/image_to_image_xl_ui.py new file mode 100644 index 0000000..b1bfd48 --- /dev/null +++ b/src/frontend/web/image_to_image_xl_ui.py @@ -0,0 +1,125 @@ +from typing import Any + +import gradio as gr + +from backend.stablediffusion.models.scheduler_types import ( + SchedulerType, + get_sampler_names, +) + +random_enabled = True + + +def get_image_to_image_xl_ui(generate_callback_fn: Any) -> None: + with gr.Blocks(): + with gr.Row(): + with gr.Column(): + + def random_seed(): + global random_enabled + random_enabled = not random_enabled + seed_val = -1 + if not random_enabled: + seed_val = 42 + + return gr.Number.update( + interactive=not random_enabled, value=seed_val + ) + + input_image = gr.Image(label="Input image", type="pil") + strength = gr.Slider( + 0.0, + 1.0, + value=0.75, + step=0.05, + label="Strength", + ) + + prompt = gr.Textbox( + label="Describe the image you'd like to see", + lines=3, + placeholder="A fantasy landscape", + ) + + neg_prompt = gr.Textbox( + label="Don't want to see", + lines=1, + placeholder="", + value="bad, deformed, ugly, bad anatomy", + ) + with gr.Accordion("Advanced options", open=False): + image_height = gr.Slider( + 768, 2048, value=1024, step=64, label="Image Height" + ) + image_width = gr.Slider( + 768, 2048, value=1024, step=64, label="Image Width" + ) + num_inference_steps = gr.Slider( + 1, 100, value=20, step=1, label="Inference Steps" + ) + scheduler = gr.Dropdown( + get_sampler_names(), + value=SchedulerType.UniPCMultistepScheduler.value, + label="Sampler", + ) + guidance_scale = gr.Slider( + 1.0, + 30.0, + value=7.5, + step=0.5, + label="Guidance Scale", + ) + num_images = gr.Slider( + 1, + 50, + value=1, + step=1, + label="Number of images to generate", + ) + attn_slicing = gr.Checkbox( + label="Attention slicing (Not supported)", + value=False, + interactive=False, + ) + seed = gr.Number( + label="Seed", + value=-1, + precision=0, + interactive=False, + ) + seed_checkbox = gr.Checkbox( + label="Use random seed", + value=True, + interactive=True, + ) + + input_params = [ + input_image, + strength, + prompt, + neg_prompt, + image_height, + image_width, + num_inference_steps, + scheduler, + guidance_scale, + num_images, + attn_slicing, + seed, + ] + + with gr.Column(): + generate_btn = gr.Button("Generate", elem_id="generate_button") + output = gr.Gallery( + label="Generated images", + show_label=True, + elem_id="gallery", + ).style( + columns=2, + ) + generate_btn.click( + fn=generate_callback_fn, + inputs=input_params, + outputs=output, + ) + seed_checkbox.change(fn=random_seed, outputs=seed) diff --git a/src/frontend/web/image_variations_ui.py b/src/frontend/web/image_variations_ui.py index dc91fbd..0daa00d 100644 --- a/src/frontend/web/image_variations_ui.py +++ b/src/frontend/web/image_variations_ui.py @@ -99,7 +99,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) generate_btn.click( fn=generate_callback_fn, diff --git a/src/frontend/web/instruct_pix_to_pix_ui.py b/src/frontend/web/instruct_pix_to_pix_ui.py index ecec6cb..b99b14b 100644 --- a/src/frontend/web/instruct_pix_to_pix_ui.py +++ b/src/frontend/web/instruct_pix_to_pix_ui.py @@ -116,7 +116,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) generate_btn.click( fn=generate_callback_fn, diff --git a/src/frontend/web/text_to_image_ui.py b/src/frontend/web/text_to_image_ui.py index 79e5f1d..5d82dd6 100644 --- a/src/frontend/web/text_to_image_ui.py +++ b/src/frontend/web/text_to_image_ui.py @@ -104,7 +104,7 @@ def random_seed(): show_label=True, elem_id="gallery", ).style( - grid=2, + columns=2, ) seed_checkbox.change(fn=random_seed, outputs=seed) generate_btn.click( diff --git a/src/frontend/web/text_to_image_xl_ui.py b/src/frontend/web/text_to_image_xl_ui.py new file mode 100644 index 0000000..f93cdcc --- /dev/null +++ b/src/frontend/web/text_to_image_xl_ui.py @@ -0,0 +1,115 @@ +from typing import Any + +import gradio as gr + +from backend.stablediffusion.models.scheduler_types import ( + SchedulerType, + get_sampler_names, +) + +random_enabled = True + + +def get_text_to_image_xl_ui(generate_callback_fn: Any) -> None: + with gr.Blocks(): + with gr.Row(): + with gr.Column(): + + def random_seed(): + global random_enabled + random_enabled = not random_enabled + seed_val = -1 + if not random_enabled: + seed_val = 42 + return gr.Number.update( + interactive=not random_enabled, value=seed_val + ) + + # with gr.Row(): + prompt = gr.Textbox( + label="Describe the image you'd like to see", + lines=3, + placeholder="A fantasy landscape", + ) + neg_prompt = gr.Textbox( + label="Don't want to see", + lines=1, + placeholder="", + value="bad, deformed, ugly, bad anatomy", + ) + with gr.Accordion("Advanced options", open=False): + image_height = gr.Slider( + 768, 2048, value=1024, step=64, label="Image Height" + ) + image_width = gr.Slider( + 768, 2048, value=1024, step=64, label="Image Width" + ) + num_inference_steps = gr.Slider( + 1, 100, value=20, step=1, label="Inference Steps" + ) + scheduler = gr.Dropdown( + get_sampler_names(), + value=SchedulerType.LMSDiscreteScheduler.value, + label="Sampler", + ) + guidance_scale = gr.Slider( + 1.0, 30.0, value=7.5, step=0.5, label="Guidance Scale" + ) + num_images = gr.Slider( + 1, + 50, + value=1, + step=1, + label="Number of images to generate", + ) + attn_slicing = gr.Checkbox( + label="Attention slicing (Not supported)", + value=False, + interactive=False, + ) + + vae_slicing = gr.Checkbox( + label="VAE slicing (Enable if low VRAM)", + value=True, + ) + seed = gr.Number( + label="Seed", + value=-1, + precision=0, + interactive=False, + ) + seed_checkbox = gr.Checkbox( + label="Use random seed", + value=True, + interactive=True, + ) + + input_params = [ + prompt, + neg_prompt, + image_height, + image_width, + num_inference_steps, + scheduler, + guidance_scale, + num_images, + attn_slicing, + vae_slicing, + seed, + ] + + with gr.Column(): + generate_btn = gr.Button("Generate", elem_id="generate_button") + output = gr.Gallery( + label="Generated images", + show_label=True, + elem_id="gallery", + ).style( + columns=2, + ) + seed_checkbox.change(fn=random_seed, outputs=seed) + generate_btn.click( + fn=generate_callback_fn, + inputs=input_params, + outputs=output, + ) diff --git a/src/frontend/web/ui.py b/src/frontend/web/ui.py index 9b4f88d..10de569 100644 --- a/src/frontend/web/ui.py +++ b/src/frontend/web/ui.py @@ -12,6 +12,8 @@ from frontend.web.instruct_pix_to_pix_ui import get_instruct_pix_to_pix_ui from frontend.web.settings_ui import get_settings_ui from frontend.web.text_to_image_ui import get_text_to_image_ui +from frontend.web.text_to_image_xl_ui import get_text_to_image_xl_ui +from frontend.web.image_to_image_xl_ui import get_image_to_image_xl_ui from frontend.web.controlnet.controlnet_image_ui import get_controlnet_to_image_ui from settings import AppSettings from utils import DiffusionMagicPaths @@ -34,7 +36,7 @@ def diffusionmagic_web_ui(generate: Generate) -> gr.Blocks: css=DiffusionMagicPaths.get_css_path(), title="DiffusionMagic", ) as diffusion_magic_ui: - gr.HTML("