diff --git a/.changeset/afraid-cougars-rescue.md b/.changeset/afraid-cougars-rescue.md new file mode 100644 index 000000000000..69ee770c78be --- /dev/null +++ b/.changeset/afraid-cougars-rescue.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Better test dir check diff --git a/.changeset/bright-planes-divide.md b/.changeset/bright-planes-divide.md new file mode 100644 index 000000000000..1751b8cce923 --- /dev/null +++ b/.changeset/bright-planes-divide.md @@ -0,0 +1,5 @@ +--- +"gradio": patch +--- + +fix: ensure all relevant packages are available to the custom component CLI diff --git a/.changeset/bright-yaks-wink.md b/.changeset/bright-yaks-wink.md new file mode 100644 index 000000000000..73eecc59132e --- /dev/null +++ b/.changeset/bright-yaks-wink.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:V4: Some misc fixes diff --git a/.changeset/cold-hoops-heal.md b/.changeset/cold-hoops-heal.md new file mode 100644 index 000000000000..2658d22210c6 --- /dev/null +++ b/.changeset/cold-hoops-heal.md @@ -0,0 +1,12 @@ +--- +"@gradio/atoms": minor +"@gradio/column": minor +"@gradio/icons": minor +"@gradio/statustracker": minor +"@gradio/tooltip": minor +"@gradio/upload": minor +"@gradio/utils": minor +"gradio": minor +--- + +feat:release first version \ No newline at end of file diff --git a/.changeset/cold-lemons-roll.md b/.changeset/cold-lemons-roll.md new file mode 100644 index 000000000000..828bc1ddb3c0 --- /dev/null +++ b/.changeset/cold-lemons-roll.md @@ -0,0 +1,33 @@ +--- +"@gradio/accordion": minor +"@gradio/annotatedimage": minor +"@gradio/app": minor +"@gradio/audio": minor +"@gradio/chatbot": minor +"@gradio/checkbox": minor +"@gradio/checkboxgroup": minor +"@gradio/code": minor +"@gradio/colorpicker": minor +"@gradio/dataframe": minor +"@gradio/dropdown": minor +"@gradio/fallback": minor +"@gradio/file": minor +"@gradio/gallery": minor +"@gradio/highlightedtext": minor +"@gradio/html": minor +"@gradio/image": minor +"@gradio/json": minor +"@gradio/label": minor +"@gradio/markdown": minor +"@gradio/model3d": minor +"@gradio/number": minor +"@gradio/plot": minor +"@gradio/radio": minor +"@gradio/slider": minor +"@gradio/statustracker": minor +"@gradio/textbox": minor +"@gradio/video": minor +"gradio": minor +--- + +feat:fix build and broken imports diff --git a/.changeset/dark-cups-see.md b/.changeset/dark-cups-see.md new file mode 100644 index 000000000000..50f8ea879105 --- /dev/null +++ b/.changeset/dark-cups-see.md @@ -0,0 +1,5 @@ +--- +"@gradio/preview": minor +--- + +feat:Fix windows paths diff --git a/.changeset/dirty-ghosts-tickle.md b/.changeset/dirty-ghosts-tickle.md new file mode 100644 index 000000000000..84898c9d8508 --- /dev/null +++ b/.changeset/dirty-ghosts-tickle.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Patch fixes diff --git a/.changeset/dry-points-join.md b/.changeset/dry-points-join.md new file mode 100644 index 000000000000..2b84d8430c35 --- /dev/null +++ b/.changeset/dry-points-join.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix js deps in cli and add gradio-preview artifacts to build diff --git a/.changeset/easy-mirrors-retire.md b/.changeset/easy-mirrors-retire.md new file mode 100644 index 000000000000..a0db5ba43f6e --- /dev/null +++ b/.changeset/easy-mirrors-retire.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Add docstring to trigger release diff --git a/.changeset/eleven-steaks-tan.md b/.changeset/eleven-steaks-tan.md new file mode 100644 index 000000000000..11956f6d6e90 --- /dev/null +++ b/.changeset/eleven-steaks-tan.md @@ -0,0 +1,6 @@ +--- +"@gradio/preview": minor +"gradio": minor +--- + +feat:Add host to dev mode for vite diff --git a/.changeset/empty-bobcats-judge.md b/.changeset/empty-bobcats-judge.md new file mode 100644 index 000000000000..96175a056119 --- /dev/null +++ b/.changeset/empty-bobcats-judge.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Make layout components templateable diff --git a/.changeset/fresh-ears-pump.md b/.changeset/fresh-ears-pump.md new file mode 100644 index 000000000000..b7b7f8bd4270 --- /dev/null +++ b/.changeset/fresh-ears-pump.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Use overrides diff --git a/.changeset/great-rice-grab.md b/.changeset/great-rice-grab.md new file mode 100644 index 000000000000..057c68e7d98b --- /dev/null +++ b/.changeset/great-rice-grab.md @@ -0,0 +1,6 @@ +--- +"@gradio/preview": minor +"gradio": minor +--- + +feat:Use tags to identify custom component dirs and ignore uninstalled components diff --git a/.changeset/heavy-animals-think.md b/.changeset/heavy-animals-think.md new file mode 100644 index 000000000000..7e2e4b1952a1 --- /dev/null +++ b/.changeset/heavy-animals-think.md @@ -0,0 +1,6 @@ +--- +"@gradio/app": patch +"gradio": patch +--- + +feat:Fix windows ci build diff --git a/.changeset/hip-drinks-bow.md b/.changeset/hip-drinks-bow.md new file mode 100644 index 000000000000..2bf10b687857 --- /dev/null +++ b/.changeset/hip-drinks-bow.md @@ -0,0 +1,8 @@ +--- +"@gradio/app": minor +"@gradio/image": minor +"@gradio/theme": minor +"gradio": minor +--- + +feat:image fixes diff --git a/.changeset/hungry-melons-pump.md b/.changeset/hungry-melons-pump.md new file mode 100644 index 000000000000..94f8be5f73fd --- /dev/null +++ b/.changeset/hungry-melons-pump.md @@ -0,0 +1,10 @@ +--- +"@gradio/app": minor +"@gradio/dataset": minor +"@gradio/preview": minor +"@gradio/state": minor +"gradio": minor +"newnewtext": minor +--- + +feat: Adds the ability to build the frontend and backend of custom components in preparation for publishing to pypi using `gradio_component build`. diff --git a/.changeset/icy-cars-boil.md b/.changeset/icy-cars-boil.md new file mode 100644 index 000000000000..5c263477f7b2 --- /dev/null +++ b/.changeset/icy-cars-boil.md @@ -0,0 +1,6 @@ +--- +"@gradio/audio": minor +"gradio": minor +--- + +feat:Fix deployed demos on v4 branch diff --git a/.changeset/large-banks-push.md b/.changeset/large-banks-push.md new file mode 100644 index 000000000000..91840ae7e2db --- /dev/null +++ b/.changeset/large-banks-push.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Use path to npm executable in subprocess diff --git a/.changeset/lazy-aliens-drive.md b/.changeset/lazy-aliens-drive.md new file mode 100644 index 000000000000..88cf59e83001 --- /dev/null +++ b/.changeset/lazy-aliens-drive.md @@ -0,0 +1,6 @@ +--- +"@gradio/theme": patch +"gradio": patch +--- + +feat:Publish js theme diff --git a/.changeset/lovely-news-speak.md b/.changeset/lovely-news-speak.md new file mode 100644 index 000000000000..9ab44daaf6a3 --- /dev/null +++ b/.changeset/lovely-news-speak.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:V4 fix typing diff --git a/.changeset/modern-forks-march.md b/.changeset/modern-forks-march.md new file mode 100644 index 000000000000..938d03bc3166 --- /dev/null +++ b/.changeset/modern-forks-march.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Set api=False for cancel events diff --git a/.changeset/nasty-dryers-show.md b/.changeset/nasty-dryers-show.md new file mode 100644 index 000000000000..6b7f852e16dd --- /dev/null +++ b/.changeset/nasty-dryers-show.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Use full path to executables in CLI diff --git a/.changeset/nice-actors-write.md b/.changeset/nice-actors-write.md new file mode 100644 index 000000000000..5e25404577f7 --- /dev/null +++ b/.changeset/nice-actors-write.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix component regex diff --git a/.changeset/old-heads-give.md b/.changeset/old-heads-give.md new file mode 100644 index 000000000000..7212fdff8c4a --- /dev/null +++ b/.changeset/old-heads-give.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Add Error + test diff --git a/.changeset/plain-groups-win.md b/.changeset/plain-groups-win.md new file mode 100644 index 000000000000..bb5d2f473f76 --- /dev/null +++ b/.changeset/plain-groups-win.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Fix python unit tests for v4 diff --git a/.changeset/plenty-teeth-clap.md b/.changeset/plenty-teeth-clap.md new file mode 100644 index 000000000000..595c4fda7d44 --- /dev/null +++ b/.changeset/plenty-teeth-clap.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix template remaining components diff --git a/.changeset/pre.json b/.changeset/pre.json new file mode 100644 index 000000000000..4de969e63932 --- /dev/null +++ b/.changeset/pre.json @@ -0,0 +1,104 @@ +{ + "mode": "pre", + "tag": "beta", + "initialVersions": { + "@gradio/client": "0.3.1", + "gradio_client": "0.5.0", + "gradio": "3.43.2", + "@gradio/cdn-test": "0.0.0", + "@gradio/spaces-test": "0.0.1", + "website": "0.4.0", + "@gradio/accordion": "0.0.4", + "@gradio/annotatedimage": "0.1.2", + "@gradio/app": "1.4.3", + "@gradio/atoms": "0.1.2", + "@gradio/audio": "0.3.2", + "@gradio/box": "0.0.4", + "@gradio/button": "0.1.3", + "@gradio/chatbot": "0.3.1", + "@gradio/checkbox": "0.1.3", + "@gradio/checkboxgroup": "0.1.2", + "@gradio/code": "0.1.2", + "@gradio/colorpicker": "0.1.2", + "@gradio/column": "0.0.1", + "@gradio/dataframe": "0.2.2", + "@gradio/dropdown": "0.1.3", + "@gradio/file": "0.1.2", + "@gradio/form": "0.0.5", + "@gradio/gallery": "0.3.2", + "@gradio/group": "0.0.1", + "@gradio/highlightedtext": "0.2.3", + "@gradio/html": "0.0.4", + "@gradio/icons": "0.1.0", + "@gradio/image": "0.2.2", + "@gradio/json": "0.0.5", + "@gradio/label": "0.1.2", + "@gradio/lite": "0.3.1", + "@gradio/markdown": "0.2.0", + "@gradio/model3d": "0.2.1", + "@gradio/number": "0.2.2", + "@gradio/plot": "0.1.2", + "@gradio/radio": "0.1.2", + "@gradio/row": "0.0.1", + "@gradio/slider": "0.1.2", + "@gradio/state": "0.0.1", + "@gradio/statustracker": "0.2.0", + "@gradio/tabitem": "0.0.4", + "@gradio/tabs": "0.0.5", + "@gradio/textbox": "0.2.0", + "@gradio/theme": "0.1.0", + "@gradio/timeseries": "0.0.5", + "@gradio/tooltip": "0.0.1", + "@gradio/tootils": "0.0.2", + "@gradio/upload": "0.2.1", + "@gradio/uploadbutton": "0.0.5", + "@gradio/utils": "0.1.1", + "@gradio/video": "0.0.6", + "@gradio/wasm": "0.0.1", + "@gradio/fallback": "0.1.1", + "@gradio/preview": "0.0.2" + }, + "changesets": [ + "afraid-cougars-rescue", + "bright-planes-divide", + "chatty-adults-reply", + "chubby-hounds-itch", + "cold-hoops-heal", + "cold-lemons-roll", + "cold-lights-trade", + "dark-cups-see", + "dirty-ghosts-tickle", + "dry-points-join", + "easy-mirrors-retire", + "empty-bobcats-judge", + "fresh-ears-pump", + "great-mammals-lead", + "heavy-animals-think", + "hip-drinks-bow", + "hot-words-sin", + "large-banks-push", + "lazy-aliens-drive", + "lovely-news-speak", + "lovely-radios-worry", + "many-tips-create", + "nice-actors-write", + "old-heads-give", + "plain-groups-win", + "plenty-teeth-clap", + "puny-papayas-bake", + "purple-jokes-shake", + "real-items-cover", + "rich-points-dance", + "sad-ears-sink", + "sad-eels-sink", + "short-clouds-see", + "silver-beers-refuse", + "slick-pants-stand", + "smart-groups-study", + "some-shoes-relate", + "strong-peas-tell", + "three-trams-sniff", + "true-bugs-shine", + "wet-places-hunt" + ] +} diff --git a/.changeset/public-chairs-chew.md b/.changeset/public-chairs-chew.md new file mode 100644 index 000000000000..c22b75e29b68 --- /dev/null +++ b/.changeset/public-chairs-chew.md @@ -0,0 +1,5 @@ +--- +"@gradio/preview": minor +--- + +feat:In dev/build use full path to python/gradio executables diff --git a/.changeset/purple-jokes-shake.md b/.changeset/purple-jokes-shake.md new file mode 100644 index 000000000000..86a7d6c99380 --- /dev/null +++ b/.changeset/purple-jokes-shake.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:backend linting diff --git a/.changeset/real-spoons-pick.md b/.changeset/real-spoons-pick.md new file mode 100644 index 000000000000..1c6b500e6dee --- /dev/null +++ b/.changeset/real-spoons-pick.md @@ -0,0 +1,6 @@ +--- +"@gradio/preview": patch +"gradio": patch +--- + +fix:Better logs in dev mode diff --git a/.changeset/sad-ears-sink.md b/.changeset/sad-ears-sink.md new file mode 100644 index 000000000000..7e205bbc7ec4 --- /dev/null +++ b/.changeset/sad-ears-sink.md @@ -0,0 +1,7 @@ +--- +"@gradio/preview": minor +"@gradio/utils": minor +"gradio": minor +--- + +feat:Fix esbuild diff --git a/.changeset/sad-eels-sink.md b/.changeset/sad-eels-sink.md new file mode 100644 index 000000000000..4d2fc8609738 --- /dev/null +++ b/.changeset/sad-eels-sink.md @@ -0,0 +1,7 @@ +--- +"@gradio/preview": minor +"@gradio/utils": minor +"gradio": minor +--- + +feat:Fix esbuild \ No newline at end of file diff --git a/.changeset/short-clouds-see.md b/.changeset/short-clouds-see.md new file mode 100644 index 000000000000..b42755253b08 --- /dev/null +++ b/.changeset/short-clouds-see.md @@ -0,0 +1,6 @@ +--- +"@gradio/preview": minor +"gradio": minor +--- + +feat:Fix front-end imports + other misc fixes diff --git a/.changeset/silver-beers-refuse.md b/.changeset/silver-beers-refuse.md new file mode 100644 index 000000000000..a111d4ce39e6 --- /dev/null +++ b/.changeset/silver-beers-refuse.md @@ -0,0 +1,41 @@ +--- +"@gradio/audio": patch +"@gradio/box": patch +"@gradio/button": patch +"@gradio/chatbot": patch +"@gradio/checkbox": patch +"@gradio/checkboxgroup": patch +"@gradio/code": patch +"@gradio/colorpicker": patch +"@gradio/column": patch +"@gradio/dataframe": patch +"@gradio/dropdown": patch +"@gradio/fallback": patch +"@gradio/file": patch +"@gradio/form": patch +"@gradio/gallery": patch +"@gradio/group": patch +"@gradio/highlightedtext": patch +"@gradio/html": patch +"@gradio/image": patch +"@gradio/json": patch +"@gradio/label": patch +"@gradio/markdown": patch +"@gradio/model3d": patch +"@gradio/number": patch +"@gradio/plot": patch +"@gradio/preview": patch +"@gradio/radio": patch +"@gradio/row": patch +"@gradio/slider": patch +"@gradio/state": patch +"@gradio/tabitem": patch +"@gradio/tabs": patch +"@gradio/textbox": patch +"@gradio/tootils": patch +"@gradio/uploadbutton": patch +"@gradio/video": patch +"gradio": patch +--- + +feat:Publish all components to npm diff --git a/.changeset/slick-bats-study.md b/.changeset/slick-bats-study.md new file mode 100644 index 000000000000..f7096d564313 --- /dev/null +++ b/.changeset/slick-bats-study.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Simplify how files are handled in components in 4.0 diff --git a/.changeset/slick-pants-stand.md b/.changeset/slick-pants-stand.md new file mode 100644 index 000000000000..b9b2298a8d7c --- /dev/null +++ b/.changeset/slick-pants-stand.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Add overwrite flag to create command diff --git a/.changeset/smart-groups-study.md b/.changeset/smart-groups-study.md new file mode 100644 index 000000000000..2e26def430ef --- /dev/null +++ b/.changeset/smart-groups-study.md @@ -0,0 +1,42 @@ +--- +"@gradio/accordion": minor +"@gradio/annotatedimage": minor +"@gradio/app": minor +"@gradio/atoms": minor +"@gradio/audio": minor +"@gradio/button": minor +"@gradio/chatbot": minor +"@gradio/checkbox": minor +"@gradio/checkboxgroup": minor +"@gradio/code": minor +"@gradio/colorpicker": minor +"@gradio/dataframe": minor +"@gradio/dropdown": minor +"@gradio/fallback": minor +"@gradio/file": minor +"@gradio/gallery": minor +"@gradio/highlightedtext": minor +"@gradio/html": minor +"@gradio/image": minor +"@gradio/json": minor +"@gradio/label": minor +"@gradio/markdown": minor +"@gradio/model3d": minor +"@gradio/number": minor +"@gradio/plot": minor +"@gradio/preview": minor +"@gradio/radio": minor +"@gradio/slider": minor +"@gradio/statustracker": minor +"@gradio/textbox": minor +"@gradio/theme": minor +"@gradio/tootils": minor +"@gradio/upload": minor +"@gradio/uploadbutton": minor +"@gradio/utils": minor +"@gradio/video": minor +"gradio": minor +"gradio_client": minor +--- + +feat:Custom components diff --git a/.changeset/some-shoes-relate.md b/.changeset/some-shoes-relate.md new file mode 100644 index 000000000000..490c8e6db083 --- /dev/null +++ b/.changeset/some-shoes-relate.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Fix layout templates diff --git a/.changeset/strong-peas-tell.md b/.changeset/strong-peas-tell.md new file mode 100644 index 000000000000..5792439a778e --- /dev/null +++ b/.changeset/strong-peas-tell.md @@ -0,0 +1,6 @@ +--- +"@gradio/app": minor +"gradio": minor +--- + +feat:Fix build and file route diff --git a/.changeset/tame-chairs-tan.md b/.changeset/tame-chairs-tan.md new file mode 100644 index 000000000000..6714bd3b388e --- /dev/null +++ b/.changeset/tame-chairs-tan.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Name Endpoints if api_name is None diff --git a/.changeset/three-trams-sniff.md b/.changeset/three-trams-sniff.md new file mode 100644 index 000000000000..074ff8fbace5 --- /dev/null +++ b/.changeset/three-trams-sniff.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:Some minor v4 fixes diff --git a/.changeset/true-bugs-shine.md b/.changeset/true-bugs-shine.md new file mode 100644 index 000000000000..efd4620c8234 --- /dev/null +++ b/.changeset/true-bugs-shine.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Support call method diff --git a/.changeset/twenty-gifts-tickle.md b/.changeset/twenty-gifts-tickle.md new file mode 100644 index 000000000000..b3ba02e3dd43 --- /dev/null +++ b/.changeset/twenty-gifts-tickle.md @@ -0,0 +1,6 @@ +--- +"gradio": minor +"gradio_client": minor +--- + +feat:Rename gradio_component to gradio component diff --git a/.changeset/two-games-dress.md b/.changeset/two-games-dress.md new file mode 100644 index 000000000000..c4ab877006dd --- /dev/null +++ b/.changeset/two-games-dress.md @@ -0,0 +1,6 @@ +--- +"@gradio/app": minor +"gradio": minor +--- + +fix:Reinstate types that were removed in error in #5832. diff --git a/.changeset/two-mirrors-nail.md b/.changeset/two-mirrors-nail.md new file mode 100644 index 000000000000..c70eed94d044 --- /dev/null +++ b/.changeset/two-mirrors-nail.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:V4: Use async version of shutil in upload route diff --git a/.changeset/vast-terms-rhyme.md b/.changeset/vast-terms-rhyme.md new file mode 100644 index 000000000000..b203aae5cc70 --- /dev/null +++ b/.changeset/vast-terms-rhyme.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:V4: Set cache dir for some component tests diff --git a/.changeset/wet-places-hunt.md b/.changeset/wet-places-hunt.md new file mode 100644 index 000000000000..9ce5d8730313 --- /dev/null +++ b/.changeset/wet-places-hunt.md @@ -0,0 +1,5 @@ +--- +"gradio": minor +--- + +feat:--overwrite deletes previous content diff --git a/.config/copy_frontend.py b/.config/copy_frontend.py new file mode 100644 index 000000000000..acfdfb20c923 --- /dev/null +++ b/.config/copy_frontend.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +import shutil +import pathlib +from typing import Any + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class BuildHook(BuildHookInterface): + def initialize(self, version: str, build_data: dict[str, Any]) -> None: + NOT_COMPONENT = [ + "app", + "node_modules", + "storybook", + "playwright-report", + "wasm", + "workbench", + "tooltils", + ] + for entry in (pathlib.Path(self.root) / "js").iterdir(): + if ( + entry.is_dir() + and not str(entry.name).startswith("_") + and not str(entry.name) in NOT_COMPONENT + ): + shutil.copytree( + str(entry), + str(pathlib.Path("gradio") / "_frontend_code" / entry.name), + ignore=lambda d, names: ["node_modules"], + dirs_exist_ok=True, + ) + shutil.copytree( + str(pathlib.Path(self.root) / "client" / "js"), + str(pathlib.Path("gradio") / "_frontend_code" / "client"), + ignore=lambda d, names: ["node_modules"], + dirs_exist_ok=True, + ) diff --git a/.config/eslint.config.js b/.config/eslint.config.js index 41f34981991b..8f4c57e68674 100644 --- a/.config/eslint.config.js +++ b/.config/eslint.config.js @@ -18,7 +18,7 @@ const js_rules_disabled = Object.fromEntries( const js_rules = { ...js_rules_disabled, - "no-console": ["error", { allow: ["warn", "error", "debug"] }], + "no-console": ["error", { allow: ["warn", "error", "debug", "info"] }], "no-constant-condition": "error", "no-dupe-args": "error", "no-extra-boolean-cast": "error", @@ -60,7 +60,8 @@ export default [ "js/app/test/**/*", "**/*vite.config.ts", "**/_website/**/*", - "**/_spaces-test/**/*" + "**/_spaces-test/**/*", + "**/preview/test/**/*" ] }, { diff --git a/.config/playwright/index.html b/.config/playwright/index.html deleted file mode 100644 index 229f296a9e5a..000000000000 --- a/.config/playwright/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - Testing Page - - -
- - - diff --git a/.config/playwright/index.ts b/.config/playwright/index.ts deleted file mode 100644 index ac6de14bf2ed..000000000000 --- a/.config/playwright/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -// Import styles, initialize component theme here. -// import '../src/common.css'; diff --git a/.github/actions/install-all-deps/action.yml b/.github/actions/install-all-deps/action.yml index 87755a5066d5..450c34aa88b6 100644 --- a/.github/actions/install-all-deps/action.yml +++ b/.github/actions/install-all-deps/action.yml @@ -53,8 +53,8 @@ runs: node_auth_token: ${{ inputs.node_auth_token }} npm_token: ${{ inputs.npm_token }} skip_build: ${{ inputs.skip_build }} - - name: generate json - shell: bash - run: | - . venv/bin/activate - python js/_website/generate_jsons/generate.py + # - name: generate json + # shell: bash + # run: | + # . venv/bin/activate + # python js/_website/generate_jsons/generate.py diff --git a/.github/workflows/backend.yml b/.github/workflows/backend.yml index 90b0f796e2ce..0e44af19801b 100644 --- a/.github/workflows/backend.yml +++ b/.github/workflows/backend.yml @@ -49,7 +49,7 @@ jobs: test-type: ["not flaky", "flaky"] python-version: ["3.8"] runs-on: ${{ matrix.os }} - continue-on-error: ${{ matrix.test-type == 'flaky' }} + continue-on-error: true steps: - uses: actions/checkout@v3 - name: Install Python @@ -88,7 +88,7 @@ jobs: node-version: 18 cache: pnpm cache-dependency-path: pnpm-lock.yaml - - name: Build frontend + - name: Build Frontend if: steps.frontend-cache.outputs.cache-hit != 'true' run: | pnpm i --frozen-lockfile --ignore-scripts @@ -141,7 +141,7 @@ jobs: test-type: ["not flaky", "flaky"] python-version: ["3.8"] runs-on: ${{ matrix.os }} - continue-on-error: ${{ matrix.test-type == 'flaky' }} + continue-on-error: true steps: - uses: actions/checkout@v3 - name: Install Python diff --git a/.github/workflows/build-pr.yml b/.github/workflows/build-pr.yml index d048dd52989c..2c8e616ca962 100644 --- a/.github/workflows/build-pr.yml +++ b/.github/workflows/build-pr.yml @@ -5,6 +5,7 @@ on: pull_request: branches: - main + - v4 jobs: comment-spaces-start: diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml index 11875590c831..a60d0fbceb1a 100644 --- a/.github/workflows/publish-npm.yml +++ b/.github/workflows/publish-npm.yml @@ -26,11 +26,7 @@ jobs: npm_token: ${{ secrets.NPM_TOKEN }} skip_build: 'true' - name: Build packages - run: | - . venv/bin/activate - pip install build - pnpm css - pnpm --filter @gradio/client --filter @gradio/lite build + run: pnpm --filter @gradio/client build - name: create and publish versions id: changesets uses: changesets/action@v1 diff --git a/.gitignore b/.gitignore index 29f6cd4ee03b..d4f4fc2f6c2a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,10 +8,14 @@ __pycache__/ *$py.class build/ __tmp/* +*.pyi + # JS build -gradio/templates/cdn -gradio/templates/frontend +gradio/templates/* +gradio/node/* +gradio/_frontend_code/* +js/gradio-preview/test/* # Secrets .env diff --git a/client/js/vite.config.js b/client/js/vite.config.js index 5edaed279e65..b3b202054a5f 100644 --- a/client/js/vite.config.js +++ b/client/js/vite.config.js @@ -2,7 +2,6 @@ import { defineConfig } from "vite"; export default defineConfig({ build: { - // minify: true, lib: { entry: "src/index.ts", formats: ["es"] diff --git a/client/python/gradio_client/cli/deploy_discord.py b/client/python/gradio_client/cli/deploy_discord.py index bb26c088abee..9a828a32a065 100644 --- a/client/python/gradio_client/cli/deploy_discord.py +++ b/client/python/gradio_client/cli/deploy_discord.py @@ -1,58 +1,48 @@ -import argparse +from typing import List, Optional + +from typer import Option +from typing_extensions import Annotated from gradio_client import Client -def main(): - parser = argparse.ArgumentParser(description="Deploy Space as Discord Bot.") - parser.add_argument("deploy-discord") - parser.add_argument( - "--src", - type=str, - help="The space id or url or gradio app you want to deploy as a gradio bot.", - ) - parser.add_argument( - "--discord-bot-token", - type=str, - help="Discord bot token. Get one on the discord website.", - ) - parser.add_argument( - "--api-names", - nargs="*", - help="Api names to turn into discord bots", - default=[], - ) - parser.add_argument( - "--to-id", - type=str, - help="Name of the space used to host the discord bot", - default=None, - ) - parser.add_argument( - "--hf-token", - type=str, - help=( - "Hugging Face token. Can be ommitted if you are logged in via huggingface_hub cli. " - "Must be provided if upstream space is private." +def main( + src: Annotated[ + Optional[str], + Option( + help="The space id or url or gradio app you want to deploy as a gradio bot." ), - default=None, - ) - parser.add_argument( - "--private", - type=bool, - nargs="?", - help="Whether the discord bot space is private.", - const=True, - default=False, - ) - args = parser.parse_args() - for i, name in enumerate(args.api_names): + ] = None, + discord_bot_token: Annotated[ + str, Option(help="Discord bot token. Get one on the discord website.") + ] = None, + api_names: Annotated[ + List[str], Option(help="Api names to turn into discord bots") + ] = None, + to_id: Annotated[ + Optional[str], Option(help="Name of the space used to host the discord bot") + ] = None, + hf_token: Annotated[ + Optional[str], + Option( + help=( + "Hugging Face token. Can be ommitted if you are logged in via huggingface_hub cli. " + "Must be provided if upstream space is private." + ) + ), + ] = None, + private: Annotated[ + bool, Option(help="Whether the discord bot space is private.") + ] = False, +): + for i, name in enumerate(api_names): if "," in name: - args.api_names[i] = tuple(name.split(",")) - Client(args.src).deploy_discord( - discord_bot_token=args.discord_bot_token, - api_names=args.api_names, - to_id=args.to_id, - hf_token=args.hf_token, - private=args.private, + api_names[i] = tuple(name.split(",")) + + Client(src).deploy_discord( + discord_bot_token=discord_bot_token, + api_names=api_names, + to_id=to_id, + hf_token=hf_token, + private=private, ) diff --git a/client/python/gradio_client/client.py b/client/python/gradio_client/client.py index f6a734512fc4..5b3c7a77cc54 100644 --- a/client/python/gradio_client/client.py +++ b/client/python/gradio_client/client.py @@ -13,6 +13,7 @@ import uuid import warnings from concurrent.futures import Future +from dataclasses import dataclass from datetime import datetime from pathlib import Path from threading import Lock @@ -29,10 +30,8 @@ ) from packaging import version -from gradio_client import serializing, utils +from gradio_client import utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.exceptions import SerializationSetupError -from gradio_client.serializing import Serializable from gradio_client.utils import ( Communicator, JobStatus, @@ -72,7 +71,7 @@ def __init__( hf_token: str | None = None, max_workers: int = 40, serialize: bool = True, - output_dir: str | Path | None = DEFAULT_TEMP_DIR, + output_dir: str | Path = DEFAULT_TEMP_DIR, verbose: bool = True, ): """ @@ -93,7 +92,9 @@ def __init__( library_version=utils.__version__, ) self.space_id = None - self.output_dir = output_dir + self.output_dir = ( + str(output_dir) if isinstance(output_dir, Path) else output_dir + ) if src.startswith("http://") or src.startswith("https://"): _src = src if src.endswith("/") else src + "/" @@ -127,6 +128,7 @@ def __init__( self.upload_url = urllib.parse.urljoin(self.src, utils.UPLOAD_URL) self.reset_url = urllib.parse.urljoin(self.src, utils.RESET_URL) self.config = self._get_config() + self._info = self._get_api_info() self.session_hash = str(uuid.uuid4()) self.endpoints = [ @@ -352,6 +354,32 @@ def fn(future): return job + def _get_api_info(self): + if self.serialize: + api_info_url = urllib.parse.urljoin(self.src, utils.API_INFO_URL) + else: + api_info_url = urllib.parse.urljoin(self.src, utils.RAW_API_INFO_URL) + + # Versions of Gradio older than 3.29.0 returned format of the API info + # from the /info endpoint + if version.parse(self.config.get("version", "2.0")) > version.Version("3.36.1"): + r = requests.get(api_info_url, headers=self.headers) + if r.ok: + info = r.json() + else: + raise ValueError(f"Could not fetch api info for {self.src}") + else: + fetch = requests.post( + utils.SPACE_FETCHER_URL, + json={"config": json.dumps(self.config), "serialize": self.serialize}, + ) + if fetch.ok: + info = fetch.json()["api"] + else: + raise ValueError(f"Could not fetch api info for {self.src}") + + return info + def view_api( self, all_endpoints: bool | None = None, @@ -427,42 +455,20 @@ def view_api( } """ - if self.serialize: - api_info_url = urllib.parse.urljoin(self.src, utils.API_INFO_URL) - else: - api_info_url = urllib.parse.urljoin(self.src, utils.RAW_API_INFO_URL) - - # Versions of Gradio older than 3.29.0 returned format of the API info - # from the /info endpoint - if version.parse(self.config.get("version", "2.0")) > version.Version("3.36.1"): - r = requests.get(api_info_url, headers=self.headers) - if r.ok: - info = r.json() - else: - raise ValueError(f"Could not fetch api info for {self.src}") - else: - fetch = requests.post( - utils.SPACE_FETCHER_URL, - json={"config": json.dumps(self.config), "serialize": self.serialize}, - ) - if fetch.ok: - info = fetch.json()["api"] - else: - raise ValueError(f"Could not fetch api info for {self.src}") - num_named_endpoints = len(info["named_endpoints"]) - num_unnamed_endpoints = len(info["unnamed_endpoints"]) + num_named_endpoints = len(self._info["named_endpoints"]) + num_unnamed_endpoints = len(self._info["unnamed_endpoints"]) if num_named_endpoints == 0 and all_endpoints is None: all_endpoints = True human_info = "Client.predict() Usage Info\n---------------------------\n" human_info += f"Named API endpoints: {num_named_endpoints}\n" - for api_name, endpoint_info in info["named_endpoints"].items(): + for api_name, endpoint_info in self._info["named_endpoints"].items(): human_info += self._render_endpoints_info(api_name, endpoint_info) if all_endpoints: human_info += f"\nUnnamed API endpoints: {num_unnamed_endpoints}\n" - for fn_index, endpoint_info in info["unnamed_endpoints"].items(): + for fn_index, endpoint_info in self._info["unnamed_endpoints"].items(): # When loading from json, the fn_indices are read as strings # because json keys can only be strings human_info += self._render_endpoints_info(int(fn_index), endpoint_info) @@ -475,7 +481,7 @@ def view_api( if return_format == "str": return human_info elif return_format == "dict": - return info + return self._info def reset_session(self) -> None: self.session_hash = str(uuid.uuid4()) @@ -661,12 +667,8 @@ def deploy_discord( raise ValueError( f"api_name {api_names[0][0]} not present in {self.space_id or self.src}" ) - inputs = [ - inp for inp in fn.input_component_types if fn not in utils.SKIP_COMPONENTS - ] - outputs = [ - inp for inp in fn.input_component_types if fn not in utils.SKIP_COMPONENTS - ] + inputs = [inp for inp in fn.input_component_types if not inp.skip] + outputs = [inp for inp in fn.input_component_types if not inp.skip] if not inputs == ["textbox"] and outputs == ["textbox"]: raise ValueError( "Currently only api_names with a single textbox as input and output are supported. " @@ -748,7 +750,7 @@ def deploy_discord( ) if is_private: huggingface_hub.add_space_secret( - space_id, "HF_TOKEN", hf_token, token=hf_token + space_id, "HF_TOKEN", hf_token, token=hf_token # type: ignore ) url = f"https://huggingface.co/spaces/{space_id}" @@ -756,6 +758,18 @@ def deploy_discord( return url +@dataclass +class ComponentApiType: + skip: bool + value_is_file: bool + is_state: bool + + +@dataclass +class ReplaceMe: + index: int + + class Endpoint: """Helper class for storing all the information about a single API endpoint.""" @@ -768,17 +782,41 @@ def __init__(self, client: Client, fn_index: int, dependency: dict): "/" + api_name if isinstance(api_name, str) else api_name ) self.use_ws = self._use_websocket(self.dependency) - self.input_component_types = [] - self.output_component_types = [] + self.input_component_types = [ + self._get_component_type(id_) for id_ in dependency["inputs"] + ] + self.output_component_types = [ + self._get_component_type(id_) for id_ in dependency["outputs"] + ] self.root_url = client.src + "/" if not client.src.endswith("/") else client.src self.is_continuous = dependency.get("types", {}).get("continuous", False) - try: - # Only a real API endpoint if backend_fn is True (so not just a frontend function), serializers are valid, - # and api_name is not False (meaning that the developer has explicitly disabled the API endpoint) - self.serializers, self.deserializers = self._setup_serializers() - self.is_valid = self.dependency["backend_fn"] and self.api_name is not False - except SerializationSetupError: - self.is_valid = False + self.download_file = lambda d: self._download_file( + d, + save_dir=self.client.output_dir, + hf_token=self.client.hf_token, + root_url=self.root_url, + ) + # Only a real API endpoint if backend_fn is True (so not just a frontend function), serializers are valid, + # and api_name is not False (meaning that the developer has explicitly disabled the API endpoint) + self.is_valid = self.dependency["backend_fn"] and self.api_name is not False + + def _get_component_type(self, component_id: int): + component = next( + i for i in self.client.config["components"] if i["id"] == component_id + ) + skip_api = component.get("skip_api", component["type"] in utils.SKIP_COMPONENTS) + return ComponentApiType( + skip_api, + self.value_is_file(component), + component["type"] == "state", + ) + + @staticmethod + def value_is_file(component: dict) -> bool: + # Hacky for now + if "api_info" not in component: + return False + return utils.value_is_file(component["api_info"]) def __repr__(self): return f"Endpoint src: {self.client.src}, api_name: {self.api_name}, fn_index: {self.fn_index}" @@ -909,140 +947,104 @@ def _upload( uploaded.append(res) return uploaded - def _add_uploaded_files_to_data( - self, - files: list[str | list[str]] | list[dict[str, Any] | list[dict[str, Any]]], - data: list[Any], - ) -> None: - """Helper function to modify the input data with the uploaded files.""" - file_counter = 0 - for i, t in enumerate(self.input_component_types): - if t in ["file", "uploadbutton"]: - data[i] = files[file_counter] - file_counter += 1 - def insert_state(self, *data) -> tuple: data = list(data) for i, input_component_type in enumerate(self.input_component_types): - if input_component_type == utils.STATE_COMPONENT: + if input_component_type.is_state: data.insert(i, None) return tuple(data) def remove_skipped_components(self, *data) -> tuple: - data = [ - d - for d, oct in zip(data, self.output_component_types) - if oct not in utils.SKIP_COMPONENTS - ] + data = [d for d, oct in zip(data, self.output_component_types) if not oct.skip] return tuple(data) def reduce_singleton_output(self, *data) -> Any: - if ( - len( - [ - oct - for oct in self.output_component_types - if oct not in utils.SKIP_COMPONENTS - ] - ) - == 1 - ): + if len([oct for oct in self.output_component_types if not oct.skip]) == 1: return data[0] else: return data - def serialize(self, *data) -> tuple: - if len(data) != len(self.serializers): - raise ValueError( - f"Expected {len(self.serializers)} arguments, got {len(data)}" + def _gather_files(self, *data): + file_list = [] + + def get_file(d): + file_list.append(d) + return ReplaceMe(len(file_list) - 1) + + new_data = [] + for i, d in enumerate(data): + if self.input_component_types[i].value_is_file: + d = utils.traverse(d, get_file, utils.is_filepath) + new_data.append(d) + return file_list, new_data + + def _add_uploaded_files_to_data(self, data: list[Any], files: list[Any]): + def replace(d: ReplaceMe) -> dict: + return files[d.index] + + new_data = [] + for d in data: + d = utils.traverse( + d, replace, is_root=lambda node: isinstance(node, ReplaceMe) ) + new_data.append(d) + return new_data - files = [ - f - for f, t in zip(data, self.input_component_types) - if t in ["file", "uploadbutton"] - ] + def serialize(self, *data) -> tuple: + files, new_data = self._gather_files(*data) uploaded_files = self._upload(files) - data = list(data) - self._add_uploaded_files_to_data(uploaded_files, data) - o = tuple([s.serialize(d) for s, d in zip(self.serializers, data)]) + data = list(new_data) + data = self._add_uploaded_files_to_data(data, uploaded_files) + data = utils.traverse( + data, + lambda s: {"name": s, "is_file": True, "data": None}, + utils.is_url, + ) + o = tuple(data) return o - def deserialize(self, *data) -> tuple: - if len(data) != len(self.deserializers): + @staticmethod + def _download_file( + x: dict, + save_dir: str, + root_url: str, + hf_token: str | None = None, + ) -> str | None: + if x is None: + return None + if isinstance(x, str): + file_name = utils.decode_base64_to_file(x, dir=save_dir).name + elif isinstance(x, dict): + if x.get("is_file"): + filepath = x.get("name") + assert filepath is not None, f"The 'name' field is missing in {x}" + file_name = utils.download_file( + root_url + "file=" + filepath, + hf_token=hf_token, + dir=save_dir, + ) + else: + data = x.get("data") + assert data is not None, f"The 'data' field is missing in {x}" + file_name = utils.decode_base64_to_file(data, dir=save_dir).name + else: raise ValueError( - f"Expected {len(self.deserializers)} outputs, got {len(data)}" + f"A FileSerializable component can only deserialize a string or a dict, not a {type(x)}: {x}" ) - outputs = tuple( - [ - s.deserialize( - d, - save_dir=self.client.output_dir, - hf_token=self.client.hf_token, - root_url=self.root_url, - ) - for s, d in zip(self.deserializers, data) - ] - ) - return outputs + return file_name + + def deserialize(self, *data) -> tuple: + data_ = list(data) + + data_: list[Any] = utils.traverse(data_, self.download_file, utils.is_file_obj) + return tuple(data_) def process_predictions(self, *predictions): - if self.client.serialize: - predictions = self.deserialize(*predictions) + predictions = self.deserialize(*predictions) predictions = self.remove_skipped_components(*predictions) predictions = self.reduce_singleton_output(*predictions) return predictions - def _setup_serializers(self) -> tuple[list[Serializable], list[Serializable]]: - inputs = self.dependency["inputs"] - serializers = [] - - for i in inputs: - for component in self.client.config["components"]: - if component["id"] == i: - component_name = component["type"] - self.input_component_types.append(component_name) - if component.get("serializer"): - serializer_name = component["serializer"] - if serializer_name not in serializing.SERIALIZER_MAPPING: - raise SerializationSetupError( - f"Unknown serializer: {serializer_name}, you may need to update your gradio_client version." - ) - serializer = serializing.SERIALIZER_MAPPING[serializer_name] - elif component_name in serializing.COMPONENT_MAPPING: - serializer = serializing.COMPONENT_MAPPING[component_name] - else: - raise SerializationSetupError( - f"Unknown component: {component_name}, you may need to update your gradio_client version." - ) - serializers.append(serializer()) # type: ignore - - outputs = self.dependency["outputs"] - deserializers = [] - for i in outputs: - for component in self.client.config["components"]: - if component["id"] == i: - component_name = component["type"] - self.output_component_types.append(component_name) - if component.get("serializer"): - serializer_name = component["serializer"] - if serializer_name not in serializing.SERIALIZER_MAPPING: - raise SerializationSetupError( - f"Unknown serializer: {serializer_name}, you may need to update your gradio_client version." - ) - deserializer = serializing.SERIALIZER_MAPPING[serializer_name] - elif component_name in utils.SKIP_COMPONENTS: - deserializer = serializing.SimpleSerializable - elif component_name in serializing.COMPONENT_MAPPING: - deserializer = serializing.COMPONENT_MAPPING[component_name] - else: - raise SerializationSetupError( - f"Unknown component: {component_name}, you may need to update your gradio_client version." - ) - deserializers.append(deserializer()) # type: ignore - - return serializers, deserializers - def _use_websocket(self, dependency: dict) -> bool: queue_enabled = self.client.config.get("enable_queue", False) queue_uses_websocket = version.parse( diff --git a/client/python/gradio_client/serializing.py b/client/python/gradio_client/serializing.py deleted file mode 100644 index ca0831de8d86..000000000000 --- a/client/python/gradio_client/serializing.py +++ /dev/null @@ -1,598 +0,0 @@ -from __future__ import annotations - -import json -import os -import secrets -import tempfile -import uuid -from pathlib import Path -from typing import Any - -from gradio_client import media_data, utils -from gradio_client.data_classes import FileData - -with open(Path(__file__).parent / "types.json") as f: - serializer_types = json.load(f) - - -class Serializable: - def serialized_info(self): - """ - The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. - Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output - """ - return self.api_info() - - def api_info(self) -> dict[str, list[str]]: - """ - The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. - Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output - """ - raise NotImplementedError() - - def example_inputs(self) -> dict[str, Any]: - """ - The example inputs for this component as a dictionary whose values are example inputs compatible with this component. - Keys of the dictionary are: raw, serialized - """ - raise NotImplementedError() - - # For backwards compatibility - def input_api_info(self) -> tuple[str, str]: - api_info = self.api_info() - types = api_info.get("serialized_input", [api_info["info"]["type"]] * 2) # type: ignore - return (types[0], types[1]) - - # For backwards compatibility - def output_api_info(self) -> tuple[str, str]: - api_info = self.api_info() - types = api_info.get("serialized_output", [api_info["info"]["type"]] * 2) # type: ignore - return (types[0], types[1]) - - def serialize(self, x: Any, load_dir: str | Path = "", allow_links: bool = False): - """ - Convert data from human-readable format to serialized format for a browser. - """ - return x - - def deserialize( - self, - x: Any, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ): - """ - Convert data from serialized format for a browser to human-readable format. - """ - return x - - -class SimpleSerializable(Serializable): - """General class that does not perform any serialization or deserialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["SimpleSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": None, - "serialized": None, - } - - -class StringSerializable(Serializable): - """Expects a string as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["StringSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": "Howdy!", - "serialized": "Howdy!", - } - - -class ListStringSerializable(Serializable): - """Expects a list of strings as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["ListStringSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": ["Howdy!", "Merhaba"], - "serialized": ["Howdy!", "Merhaba"], - } - - -class BooleanSerializable(Serializable): - """Expects a boolean as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["BooleanSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": True, - "serialized": True, - } - - -class NumberSerializable(Serializable): - """Expects a number (int/float) as input/output but performs no serialization.""" - - def api_info(self) -> dict[str, bool | dict]: - return { - "info": serializer_types["NumberSerializable"], - "serialized_info": False, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": 5, - "serialized": 5, - } - - -class ImgSerializable(Serializable): - """Expects a base64 string as input/output which is serialized to a filepath.""" - - def serialized_info(self): - return { - "type": "string", - "description": "filepath on your computer (or URL) of image", - } - - def api_info(self) -> dict[str, bool | dict]: - return {"info": serializer_types["ImgSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": media_data.BASE64_IMAGE, - "serialized": "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - } - - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - allow_links: bool = False, - ) -> str | None: - """ - Convert from human-friendly version of a file (string filepath) to a serialized - representation (base64). - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - """ - if not x: - return None - if utils.is_http_url_like(x): - return utils.encode_url_to_base64(x) - return utils.encode_file_to_base64(Path(load_dir) / x) - - def deserialize( - self, - x: str | None, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by save_dir - Parameters: - x: Base64 representation of image to deserialize into a string filepath - save_dir: Path to directory to save the deserialized image to - root_url: Ignored - hf_token: Ignored - """ - if x is None or x == "": - return None - file = utils.decode_base64_to_file(x, dir=save_dir) - return file.name - - -class FileSerializable(Serializable): - """Expects a dict with base64 representation of object as input/output which is serialized to a filepath.""" - - def __init__(self) -> None: - self.stream = None - self.stream_name = None - super().__init__() - - def serialized_info(self): - return self._single_file_serialized_info() - - def _single_file_api_info(self): - return { - "info": serializer_types["SingleFileSerializable"], - "serialized_info": True, - } - - def _single_file_serialized_info(self): - return { - "type": "string", - "description": "filepath on your computer (or URL) of file", - } - - def _multiple_file_serialized_info(self): - return { - "type": "array", - "description": "List of filepath(s) or URL(s) to files", - "items": { - "type": "string", - "description": "filepath on your computer (or URL) of file", - }, - } - - def _multiple_file_api_info(self): - return { - "info": serializer_types["MultipleFileSerializable"], - "serialized_info": True, - } - - def api_info(self) -> dict[str, dict | bool]: - return self._single_file_api_info() - - def example_inputs(self) -> dict[str, Any]: - return self._single_file_example_inputs() - - def _single_file_example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_FILE}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf", - } - - def _multiple_file_example_inputs(self) -> dict[str, Any]: - return { - "raw": [{"is_file": False, "data": media_data.BASE64_FILE}], - "serialized": [ - "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" - ], - } - - def _serialize_single( - self, - x: str | FileData | None, - load_dir: str | Path = "", - allow_links: bool = False, - ) -> FileData | None: - if x is None or isinstance(x, dict): - return x - if utils.is_http_url_like(x): - filename = x - size = None - else: - filename = str(Path(load_dir) / x) - size = Path(filename).stat().st_size - return { - "name": filename, - "data": None - if allow_links - else utils.encode_url_or_file_to_base64(filename), - "orig_name": Path(filename).name, - "is_file": allow_links, - "size": size, - } - - def _setup_stream(self, url, hf_token): - return utils.download_byte_stream(url, hf_token) - - def _deserialize_single( - self, - x: str | FileData | None, - save_dir: str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - if x is None: - return None - if isinstance(x, str): - file_name = utils.decode_base64_to_file(x, dir=save_dir).name - elif isinstance(x, dict): - if x.get("is_file"): - filepath = x.get("name") - if filepath is None: - raise ValueError(f"The 'name' field is missing in {x}") - if root_url is not None: - file_name = utils.download_tmp_copy_of_file( - root_url + "file=" + filepath, - hf_token=hf_token, - dir=save_dir, - ) - else: - file_name = utils.create_tmp_copy_of_file(filepath, dir=save_dir) - elif x.get("is_stream"): - assert x["name"] and root_url and save_dir - if not self.stream or self.stream_name != x["name"]: - self.stream = self._setup_stream( - root_url + "stream/" + x["name"], hf_token=hf_token - ) - self.stream_name = x["name"] - chunk = next(self.stream) - path = Path(save_dir or tempfile.gettempdir()) / secrets.token_hex(20) - path.mkdir(parents=True, exist_ok=True) - path = path / x.get("orig_name", "output") - path.write_bytes(chunk) - file_name = str(path) - else: - data = x.get("data") - if data is None: - raise ValueError(f"The 'data' field is missing in {x}") - file_name = utils.decode_base64_to_file(data, dir=save_dir).name - else: - raise ValueError( - f"A FileSerializable component can only deserialize a string or a dict, not a {type(x)}: {x}" - ) - return file_name - - def serialize( - self, - x: str | FileData | None | list[str | FileData | None], - load_dir: str | Path = "", - allow_links: bool = False, - ) -> FileData | None | list[FileData | None]: - """ - Convert from human-friendly version of a file (string filepath) to a - serialized representation (base64) - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - allow_links: Will allow path returns instead of raw file content - """ - if x is None or x == "": - return None - if isinstance(x, list): - return [self._serialize_single(f, load_dir, allow_links) for f in x] - else: - return self._serialize_single(x, load_dir, allow_links) - - def deserialize( - self, - x: str | FileData | None | list[str | FileData | None], - save_dir: Path | str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None | list[str | None]: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Base64 representation of file to deserialize into a string filepath - save_dir: Path to directory to save the deserialized file to - root_url: If this component is loaded from an external Space, this is the URL of the Space. - hf_token: If this component is loaded from an external private Space, this is the access token for the Space - """ - if x is None: - return None - if isinstance(save_dir, Path): - save_dir = str(save_dir) - if isinstance(x, list): - return [ - self._deserialize_single( - f, save_dir=save_dir, root_url=root_url, hf_token=hf_token - ) - for f in x - ] - else: - return self._deserialize_single( - x, save_dir=save_dir, root_url=root_url, hf_token=hf_token - ) - - -class VideoSerializable(FileSerializable): - def serialized_info(self): - return { - "type": "string", - "description": "filepath on your computer (or URL) of video file", - } - - def api_info(self) -> dict[str, dict | bool]: - return {"info": serializer_types["FileSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_VIDEO}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/video_sample.mp4", - } - - def serialize( - self, x: str | None, load_dir: str | Path = "", allow_links: bool = False - ) -> tuple[FileData | None, None]: - return (super().serialize(x, load_dir, allow_links), None) # type: ignore - - def deserialize( - self, - x: tuple[FileData | None, FileData | None] | None, - save_dir: Path | str | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | tuple[str | None, str | None] | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by `save_dir` - """ - if isinstance(x, (tuple, list)): - if len(x) != 2: - raise ValueError(f"Expected tuple of length 2. Received: {x}") - x_as_list = [x[0], x[1]] - else: - raise ValueError(f"Expected tuple of length 2. Received: {x}") - deserialized_file = super().deserialize(x_as_list, save_dir, root_url, hf_token) # type: ignore - if isinstance(deserialized_file, list): - return deserialized_file[0] # ignore subtitles - - -class JSONSerializable(Serializable): - def serialized_info(self): - return {"type": "string", "description": "filepath to JSON file"} - - def api_info(self) -> dict[str, dict | bool]: - return {"info": serializer_types["JSONSerializable"], "serialized_info": True} - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"a": 1, "b": 2}, - "serialized": None, - } - - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - allow_links: bool = False, - ) -> dict | list | None: - """ - Convert from a a human-friendly version (string path to json file) to a - serialized representation (json string) - Parameters: - x: String path to json file to read to get json string - load_dir: Path to directory containing x - """ - if x is None or x == "": - return None - return utils.file_to_json(Path(load_dir) / x) - - def deserialize( - self, - x: str | dict | list, - save_dir: str | Path | None = None, - root_url: str | None = None, - hf_token: str | None = None, - ) -> str | None: - """ - Convert from serialized representation (json string) to a human-friendly - version (string path to json file). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Json string - save_dir: Path to save the deserialized json file to - root_url: Ignored - hf_token: Ignored - """ - if x is None: - return None - return utils.dict_or_str_to_json_file(x, dir=save_dir).name - - -class GallerySerializable(Serializable): - def serialized_info(self): - return { - "type": "string", - "description": "path to directory with images and a file associating images with captions called captions.json", - } - - def api_info(self) -> dict[str, dict | bool]: - return { - "info": serializer_types["GallerySerializable"], - "serialized_info": True, - } - - def example_inputs(self) -> dict[str, Any]: - return { - "raw": [media_data.BASE64_IMAGE] * 2, - "serialized": [ - "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png", - ] - * 2, - } - - def serialize( - self, x: str | None, load_dir: str | Path = "", allow_links: bool = False - ) -> list[list[str | None]] | None: - if x is None or x == "": - return None - files = [] - captions_file = Path(x) / "captions.json" - with captions_file.open("r") as captions_json: - captions = json.load(captions_json) - for file_name, caption in captions.items(): - img = FileSerializable().serialize(file_name, allow_links=allow_links) - files.append([img, caption]) - return files - - def deserialize( - self, - x: list[list[str | None]] | None, - save_dir: str = "", - root_url: str | None = None, - hf_token: str | None = None, - ) -> None | str: - if x is None: - return None - gallery_path = Path(save_dir) / str(uuid.uuid4()) - gallery_path.mkdir(exist_ok=True, parents=True) - captions = {} - for img_data in x: - if isinstance(img_data, (list, tuple)): - img_data, caption = img_data - else: - caption = None - name = FileSerializable().deserialize( - img_data, gallery_path, root_url=root_url, hf_token=hf_token - ) - captions[name] = caption - captions_file = gallery_path / "captions.json" - with captions_file.open("w") as captions_json: - json.dump(captions, captions_json) - return os.path.abspath(gallery_path) - - -SERIALIZER_MAPPING = {} -for cls in Serializable.__subclasses__(): - SERIALIZER_MAPPING[cls.__name__] = cls - for subcls in cls.__subclasses__(): - SERIALIZER_MAPPING[subcls.__name__] = subcls - -SERIALIZER_MAPPING["Serializable"] = SimpleSerializable -SERIALIZER_MAPPING["File"] = FileSerializable -SERIALIZER_MAPPING["UploadButton"] = FileSerializable - -COMPONENT_MAPPING: dict[str, type] = { - "textbox": StringSerializable, - "number": NumberSerializable, - "slider": NumberSerializable, - "checkbox": BooleanSerializable, - "checkboxgroup": ListStringSerializable, - "radio": StringSerializable, - "dropdown": SimpleSerializable, - "image": ImgSerializable, - "video": FileSerializable, - "audio": FileSerializable, - "file": FileSerializable, - "dataframe": JSONSerializable, - "timeseries": JSONSerializable, - "fileexplorer": JSONSerializable, - "state": SimpleSerializable, - "button": StringSerializable, - "uploadbutton": FileSerializable, - "colorpicker": StringSerializable, - "label": JSONSerializable, - "highlightedtext": JSONSerializable, - "json": JSONSerializable, - "html": StringSerializable, - "gallery": GallerySerializable, - "chatbot": JSONSerializable, - "model3d": FileSerializable, - "plot": JSONSerializable, - "barplot": JSONSerializable, - "lineplot": JSONSerializable, - "scatterplot": JSONSerializable, - "markdown": StringSerializable, - "code": StringSerializable, - "annotatedimage": JSONSerializable, -} diff --git a/client/python/gradio_client/utils.py b/client/python/gradio_client/utils.py index 421ff11ac121..9996015aad90 100644 --- a/client/python/gradio_client/utils.py +++ b/client/python/gradio_client/utils.py @@ -2,6 +2,7 @@ import asyncio import base64 +import hashlib import json import mimetypes import os @@ -35,20 +36,6 @@ RESET_URL = "reset" SPACE_URL = "https://hf.space/{}" -SKIP_COMPONENTS = { - "state", - "row", - "column", - "tabs", - "tab", - "tabitem", - "box", - "form", - "accordion", - "group", - "interpretation", - "dataset", -} STATE_COMPONENT = "state" INVALID_RUNTIME = [ SpaceStage.NO_APP_FILE, @@ -322,21 +309,31 @@ async def get_pred_from_ws( ######################## -def download_tmp_copy_of_file( - url_path: str, hf_token: str | None = None, dir: str | None = None +def download_file( + url_path: str, + dir: str, + hf_token: str | None = None, ) -> str: if dir is not None: os.makedirs(dir, exist_ok=True) headers = {"Authorization": "Bearer " + hf_token} if hf_token else {} - directory = Path(dir or tempfile.gettempdir()) / secrets.token_hex(20) - directory.mkdir(exist_ok=True, parents=True) - file_path = directory / Path(url_path).name + + sha1 = hashlib.sha1() + temp_dir = Path(tempfile.gettempdir()) / secrets.token_hex(20) + temp_dir.mkdir(exist_ok=True, parents=True) with requests.get(url_path, headers=headers, stream=True) as r: r.raise_for_status() - with open(file_path, "wb") as f: - shutil.copyfileobj(r.raw, f) - return str(file_path.resolve()) + with open(temp_dir / Path(url_path).name, "wb") as f: + for chunk in r.iter_content(chunk_size=128 * sha1.block_size): + sha1.update(chunk) + f.write(chunk) + + directory = Path(dir) / sha1.hexdigest() + directory.mkdir(exist_ok=True, parents=True) + dest = directory / Path(url_path).name + shutil.move(temp_dir / Path(url_path).name, dest) + return str(dest.resolve()) def create_tmp_copy_of_file(file_path: str, dir: str | None = None) -> str: @@ -545,8 +542,16 @@ class APIInfoParseError(ValueError): def get_type(schema: dict): - if "type" in schema: + if not isinstance(schema, dict): + breakpoint() + if "const" in schema: + return "const" + if "enum" in schema: + return "enum" + elif "type" in schema: return schema["type"] + elif schema.get("$ref"): + return "$ref" elif schema.get("oneOf"): return "oneOf" elif schema.get("anyOf"): @@ -555,16 +560,32 @@ def get_type(schema: dict): raise APIInfoParseError(f"Cannot parse type for {schema}") +FILE_DATA = "Dict(name: str | None, data: str | None, size: int | None, is_file: bool | None, orig_name: str | None, mime_type: str | None)" + + def json_schema_to_python_type(schema: Any) -> str: + type_ = _json_schema_to_python_type(schema, schema.get("$defs")) + return type_.replace(FILE_DATA, "filepath") + + +def _json_schema_to_python_type(schema: Any, defs) -> str: """Convert the json schema into a python type hint""" + if schema == {}: + return "Any" type_ = get_type(schema) if type_ == {}: if "json" in schema["description"]: return "Dict[Any, Any]" else: return "Any" + elif type_ == "$ref": + return _json_schema_to_python_type(defs[schema["$ref"].split("/")[-1]], defs) elif type_ == "null": return "None" + elif type_ == "const": + return f"Litetal[{schema['const']}]" + elif type_ == "enum": + return f"Literal[{', '.join([str(v) for v in schema['enum']])}]" elif type_ == "integer": return "int" elif type_ == "string": @@ -577,22 +598,82 @@ def json_schema_to_python_type(schema: Any) -> str: items = schema.get("items") if "prefixItems" in items: elements = ", ".join( - [json_schema_to_python_type(i) for i in items["prefixItems"]] + [_json_schema_to_python_type(i, defs) for i in items["prefixItems"]] ) return f"Tuple[{elements}]" else: - elements = json_schema_to_python_type(items) + elements = _json_schema_to_python_type(items, defs) return f"List[{elements}]" elif type_ == "object": + + def get_desc(v): + return f" ({v.get('description')})" if v.get("description") else "" + + if "additionalProperties" in schema: + return f"Dict[str, {_json_schema_to_python_type(schema['additionalProperties'], defs)}]" + + props = schema.get("properties") + des = ", ".join( [ - f"{n}: {json_schema_to_python_type(v)} ({v.get('description')})" - for n, v in schema["properties"].items() + f"{n}: {_json_schema_to_python_type(v, defs)}{get_desc(v)}" + for n, v in props.items() + if n != "$defs" ] ) return f"Dict({des})" elif type_ in ["oneOf", "anyOf"]: - desc = " | ".join([json_schema_to_python_type(i) for i in schema[type_]]) + desc = " | ".join([_json_schema_to_python_type(i, defs) for i in schema[type_]]) return desc else: raise APIInfoParseError(f"Cannot parse schema {schema}") + + +def traverse(json_obj: Any, func: Callable, is_root: Callable) -> Any: + if is_root(json_obj): + return func(json_obj) + elif isinstance(json_obj, dict): + new_obj = {} + for key, value in json_obj.items(): + new_obj[key] = traverse(value, func, is_root) + return new_obj + elif isinstance(json_obj, (list, tuple)): + new_obj = [] + for item in json_obj: + new_obj.append(traverse(item, func, is_root)) + return new_obj + else: + return json_obj + + +def value_is_file(api_info: dict) -> bool: + info = _json_schema_to_python_type(api_info, api_info.get("$defs")) + return FILE_DATA in info + + +def is_filepath(s): + return isinstance(s, str) and Path(s).exists() + + +def is_url(s): + return isinstance(s, str) and is_http_url_like(s) + + +def is_file_obj(d): + return isinstance(d, dict) and "name" in d and "is_file" in d and "data" in d + + +SKIP_COMPONENTS = { + "state", + "row", + "column", + "tabs", + "tab", + "tabitem", + "box", + "form", + "accordion", + "group", + "interpretation", + "dataset", +} diff --git a/client/python/test/conftest.py b/client/python/test/conftest.py index 13a3d237501a..9fb31e603dd7 100644 --- a/client/python/test/conftest.py +++ b/client/python/test/conftest.py @@ -179,6 +179,31 @@ def show(n): return demo.queue() +@pytest.fixture +def count_generator_no_api(): + def count(n): + for i in range(int(n)): + time.sleep(0.5) + yield i + + def show(n): + return str(list(range(int(n)))) + + with gr.Blocks() as demo: + with gr.Column(): + num = gr.Number(value=10) + with gr.Row(): + count_btn = gr.Button("Count") + list_btn = gr.Button("List") + with gr.Column(): + out = gr.Textbox() + + count_btn.click(count, num, out, api_name=False) + list_btn.click(show, num, out, api_name=False) + + return demo.queue() + + @pytest.fixture def count_generator_demo_exception(): def count(n): @@ -323,6 +348,13 @@ def _stream_audio(audio_file): ).queue() +@pytest.fixture +def video_component(): + return gr.Interface( + fn=lambda x: x, inputs=gr.Video(type="file"), outputs=gr.Video() + ) + + @pytest.fixture def all_components(): classes_to_check = gr.components.Component.__subclasses__() @@ -342,3 +374,12 @@ def all_components(): subclasses.append(subclass) return subclasses + + +@pytest.fixture(autouse=True) +def gradio_temp_dir(monkeypatch, tmp_path): + """tmp_path is unique to each test function. + It will be cleared automatically according to pytest docs: https://docs.pytest.org/en/6.2.x/reference.html#tmp-path + """ + monkeypatch.setenv("GRADIO_TEMP_DIR", str(tmp_path)) + return tmp_path diff --git a/client/python/test/requirements.txt b/client/python/test/requirements.txt index 064b9322508e..494cf0db4e99 100644 --- a/client/python/test/requirements.txt +++ b/client/python/test/requirements.txt @@ -2,6 +2,6 @@ black==23.3.0 pytest-asyncio pytest==7.1.2 ruff==0.0.264 -pyright==1.1.305 +pyright==1.1.327 gradio pydub==0.25.1 diff --git a/client/python/test/test_client.py b/client/python/test/test_client.py index a01153bb6011..f4cac6363965 100644 --- a/client/python/test/test_client.py +++ b/client/python/test/test_client.py @@ -1,4 +1,3 @@ -import json import pathlib import tempfile import time @@ -19,17 +18,18 @@ from gradio_client import Client from gradio_client.client import DEFAULT_TEMP_DIR -from gradio_client.serializing import Serializable from gradio_client.utils import Communicator, ProgressUnit, Status, StatusUpdate HF_TOKEN = "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" # Intentionally revealing this key for testing purposes @contextmanager -def connect(demo: gr.Blocks, serialize: bool = True): +def connect( + demo: gr.Blocks, serialize: bool = True, output_dir: str = DEFAULT_TEMP_DIR +): _, local_url, _ = demo.launch(prevent_thread_lock=True) try: - yield Client(local_url, serialize=serialize) + yield Client(local_url, serialize=serialize, output_dir=output_dir) finally: # A more verbose version of .close() # because we should set a timeout @@ -51,8 +51,8 @@ def test_raise_error_invalid_state(self): @pytest.mark.flaky def test_numerical_to_label_space(self): client = Client("gradio-tests/titanic-survival") - with open(client.predict("male", 77, 10, api_name="/predict")) as f: - assert json.load(f)["label"] == "Perishes" + label = client.predict("male", 77, 10, api_name="/predict") + assert label["label"] == "Perishes" with pytest.raises( ValueError, match="This Gradio app might have multiple endpoints. Please specify an `api_name` or `fn_index`", @@ -176,24 +176,33 @@ def test_raises_exception_no_queue(self, sentiment_classification_demo): job = client.submit([5], api_name="/sleep") job.result() - @pytest.mark.flaky - def test_job_output_video(self): - client = Client(src="gradio/video_component") - job = client.submit( - "https://huggingface.co/spaces/gradio/video_component/resolve/main/files/a.mp4", - fn_index=0, - ) - assert Path(job.result()).exists() - assert Path(DEFAULT_TEMP_DIR).resolve() in Path(job.result()).resolve().parents + def test_job_output_video(self, video_component): + with connect(video_component) as client: + job = client.submit( + { + "video": "https://huggingface.co/spaces/gradio/video_component/resolve/main/files/a.mp4" + }, + fn_index=0, + ) + assert Path(job.result()["video"]).exists() + assert ( + Path(DEFAULT_TEMP_DIR).resolve() + in Path(job.result()["video"]).resolve().parents + ) temp_dir = tempfile.mkdtemp() - client = Client(src="gradio/video_component", output_dir=temp_dir) - job = client.submit( - "https://huggingface.co/spaces/gradio/video_component/resolve/main/files/a.mp4", - fn_index=0, - ) - assert Path(job.result()).exists() - assert Path(temp_dir).resolve() in Path(job.result()).resolve().parents + with connect(video_component, output_dir=temp_dir) as client: + job = client.submit( + { + "video": "https://huggingface.co/spaces/gradio/video_component/resolve/main/files/a.mp4" + }, + fn_index=0, + ) + assert Path(job.result()["video"]).exists() + assert ( + Path(temp_dir).resolve() + in Path(job.result()["video"]).resolve().parents + ) def test_progress_updates(self, progress_demo): with connect(progress_demo) as client: @@ -254,20 +263,21 @@ def test_cancel_from_client_queued(self, cancel_from_client_demo): def test_cancel_subsequent_jobs_state_reset(self, yield_demo): with connect(yield_demo) as client: - job1 = client.submit("abcdefefadsadfs") + job1 = client.submit("abcdefefadsadfs", api_name="/predict") time.sleep(3) job1.cancel() assert len(job1.outputs()) < len("abcdefefadsadfs") assert job1.status().code == Status.CANCELLED - job2 = client.submit("abcd") + job2 = client.submit("abcd", api_name="/predict") while not job2.done(): time.sleep(0.1) # Ran all iterations from scratch assert job2.status().code == Status.FINISHED assert len(job2.outputs()) == 4 + @pytest.mark.xfail def test_stream_audio(self, stream_audio): with connect(stream_audio) as client: job1 = client.submit( @@ -283,6 +293,7 @@ def test_stream_audio(self, stream_audio): assert Path(job2.result()).exists() assert all(Path(p).exists() for p in job2.outputs()) + @pytest.mark.xfail @pytest.mark.flaky def test_upload_file_private_space(self): client = Client( @@ -336,6 +347,7 @@ def test_upload_file_private_space(self): assert f.read() == "File2" upload.assert_called_once() + @pytest.mark.xfail @pytest.mark.flaky def test_upload_file_upload_route_does_not_exist(self): client = Client( @@ -390,6 +402,7 @@ def greet(name): finally: server.thread.join(timeout=1) + @pytest.mark.xfail def test_predict_with_space_with_api_name_false(self): client = Client("gradio-tests/client-bool-api-name-error") assert client.predict("Hello!", api_name="/run") == "Hello!" @@ -416,7 +429,7 @@ def test_return_layout_and_state_components( class TestStatusUpdates: @patch("gradio_client.client.Endpoint.make_end_to_end_fn") - def test_messages_passed_correctly(self, mock_make_end_to_end_fn): + def test_messages_passed_correctly(self, mock_make_end_to_end_fn, calculator_demo): now = datetime.now() messages = [ @@ -488,18 +501,20 @@ def __call__(self, *args, **kwargs): mock_make_end_to_end_fn.side_effect = MockEndToEndFunction - client = Client(src="gradio/calculator") - job = client.submit(5, "add", 6, api_name="/predict") + with connect(calculator_demo) as client: + job = client.submit(5, "add", 6, api_name="/predict") - statuses = [] - while not job.done(): - statuses.append(job.status()) - time.sleep(0.09) + statuses = [] + while not job.done(): + statuses.append(job.status()) + time.sleep(0.09) - assert all(s in messages for s in statuses) + assert all(s in messages for s in statuses) @patch("gradio_client.client.Endpoint.make_end_to_end_fn") - def test_messages_correct_two_concurrent(self, mock_make_end_to_end_fn): + def test_messages_correct_two_concurrent( + self, mock_make_end_to_end_fn, calculator_demo + ): now = datetime.now() messages_1 = [ @@ -563,21 +578,22 @@ def __call__(self, *args, **kwargs): mock_make_end_to_end_fn.side_effect = MockEndToEndFunction - client = Client(src="gradio/calculator") - job_1 = client.submit(5, "add", 6, api_name="/predict") - job_2 = client.submit(11, "subtract", 1, api_name="/predict") + with connect(calculator_demo) as client: + job_1 = client.submit(5, "add", 6, api_name="/predict") + job_2 = client.submit(11, "subtract", 1, api_name="/predict") - statuses_1 = [] - statuses_2 = [] - while not (job_1.done() and job_2.done()): - statuses_1.append(job_1.status()) - statuses_2.append(job_2.status()) - time.sleep(0.05) + statuses_1 = [] + statuses_2 = [] + while not (job_1.done() and job_2.done()): + statuses_1.append(job_1.status()) + statuses_2.append(job_2.status()) + time.sleep(0.05) - assert all(s in messages_1 for s in statuses_1) + assert all(s in messages_1 for s in statuses_1) class TestAPIInfo: + @pytest.mark.xfail @pytest.mark.parametrize("trailing_char", ["/", ""]) def test_test_endpoint_src(self, trailing_char): src = "https://gradio-calculator.hf.space" + trailing_char @@ -731,12 +747,6 @@ def test_numerical_to_label_space(self): "unnamed_endpoints": {}, } - def test_serializable_in_mapping(self, calculator_demo): - with connect(calculator_demo) as client: - assert all( - isinstance(c, Serializable) for c in client.endpoints[0].serializers - ) - def test_state_does_not_appear(self, state_demo): with connect(state_demo) as client: api_info = client.view_api(return_format="dict") @@ -778,67 +788,80 @@ def test_private_space(self): } @pytest.mark.flaky - def test_fetch_fixed_version_space(self): - assert Client("gradio-tests/calculator").view_api(return_format="dict") == { - "named_endpoints": { - "/predict": { - "parameters": [ - { - "label": "num1", - "type": {"type": "number"}, - "python_type": {"type": "int | float", "description": ""}, - "component": "Number", - "example_input": 5, - "serializer": "NumberSerializable", - }, - { - "label": "operation", - "type": {"type": "string"}, - "python_type": {"type": "str", "description": ""}, - "component": "Radio", - "example_input": "add", - "serializer": "StringSerializable", - }, - { - "label": "num2", - "type": {"type": "number"}, - "python_type": {"type": "int | float", "description": ""}, - "component": "Number", - "example_input": 5, - "serializer": "NumberSerializable", - }, - ], - "returns": [ - { - "label": "output", - "type": {"type": "number"}, - "python_type": {"type": "int | float", "description": ""}, - "component": "Number", - "serializer": "NumberSerializable", - } - ], - } - }, - "unnamed_endpoints": {}, - } + def test_fetch_fixed_version_space(self, calculator_demo): + with connect(calculator_demo) as client: + assert client.view_api(return_format="dict") == { + "named_endpoints": { + "/predict": { + "parameters": [ + { + "label": "num1", + "type": {"type": "number"}, + "python_type": { + "type": "int | float", + "description": "", + }, + "component": "Number", + "example_input": 3, + }, + { + "label": "operation", + "type": { + "enum": ["add", "subtract", "multiply", "divide"], + "title": "Radio", + "type": "string", + }, + "python_type": { + "type": "Literal[add, subtract, multiply, divide]", + "description": "", + }, + "component": "Radio", + "example_input": "add", + }, + { + "label": "num2", + "type": {"type": "number"}, + "python_type": { + "type": "int | float", + "description": "", + }, + "component": "Number", + "example_input": 3, + }, + ], + "returns": [ + { + "label": "output", + "type": {"type": "number"}, + "python_type": { + "type": "int | float", + "description": "", + }, + "component": "Number", + } + ], + } + }, + "unnamed_endpoints": {}, + } def test_unnamed_endpoints_use_fn_index(self, count_generator_demo): with connect(count_generator_demo) as client: info = client.view_api(return_format="str") - assert "fn_index=0" in info - assert "api_name" not in info + assert "fn_index" not in info + assert "api_name" in info - def test_api_false_endpoints_do_not_appear(self, count_generator_demo): - with connect(count_generator_demo) as client: + def test_api_false_endpoints_do_not_appear(self, count_generator_no_api): + with connect(count_generator_no_api) as client: info = client.view_api(return_format="dict") assert len(info["named_endpoints"]) == 0 - assert len(info["unnamed_endpoints"]) == 2 def test_api_false_endpoints_cannot_be_accessed_with_fn_index(self, increment_demo): with connect(increment_demo) as client: with pytest.raises(ValueError): client.submit(1, fn_index=2) + @pytest.mark.xfail def test_file_io(self, file_io_demo): with connect(file_io_demo) as client: info = client.view_api(return_format="dict") @@ -847,7 +870,7 @@ def test_file_io(self, file_io_demo): assert inputs[0]["type"]["type"] == "array" assert inputs[0]["python_type"] == { - "type": "List[str]", + "type": "List[filepath]", "description": "List of filepath(s) or URL(s) to files", } assert isinstance(inputs[0]["example_input"], list) @@ -882,8 +905,7 @@ def test_layout_components_in_output(self, hello_world_with_group): "type": {"type": "string"}, "python_type": {"type": "str", "description": ""}, "component": "Textbox", - "example_input": "Howdy!", - "serializer": "StringSerializable", + "example_input": "Hello!!", } ], "returns": [ @@ -892,7 +914,6 @@ def test_layout_components_in_output(self, hello_world_with_group): "type": {"type": "string"}, "python_type": {"type": "str", "description": ""}, "component": "Textbox", - "serializer": "StringSerializable", } ], }, @@ -900,10 +921,6 @@ def test_layout_components_in_output(self, hello_world_with_group): }, "unnamed_endpoints": {}, } - assert info["named_endpoints"]["/show_group"] == { - "parameters": [], - "returns": [], - } def test_layout_and_state_components_in_output( self, hello_world_with_state_and_accordion @@ -919,8 +936,7 @@ def test_layout_and_state_components_in_output( "type": {"type": "string"}, "python_type": {"type": "str", "description": ""}, "component": "Textbox", - "example_input": "Howdy!", - "serializer": "StringSerializable", + "example_input": "Hello!!", } ], "returns": [ @@ -929,7 +945,6 @@ def test_layout_and_state_components_in_output( "type": {"type": "string"}, "python_type": {"type": "str", "description": ""}, "component": "Textbox", - "serializer": "StringSerializable", }, { "label": "count", @@ -939,7 +954,6 @@ def test_layout_and_state_components_in_output( "description": "", }, "component": "Number", - "serializer": "NumberSerializable", }, ], }, @@ -954,7 +968,6 @@ def test_layout_and_state_components_in_output( "description": "", }, "component": "Number", - "serializer": "NumberSerializable", } ], }, @@ -969,7 +982,6 @@ def test_layout_and_state_components_in_output( "description": "", }, "component": "Number", - "serializer": "NumberSerializable", } ], }, @@ -979,6 +991,7 @@ def test_layout_and_state_components_in_output( class TestEndpoints: + @pytest.mark.xfail def test_upload(self): client = Client( src="gradio-tests/not-actually-private-file-upload", hf_token=HF_TOKEN diff --git a/client/python/test/test_serializing.py b/client/python/test/test_serializing.py deleted file mode 100644 index 8bf4fb104498..000000000000 --- a/client/python/test/test_serializing.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import tempfile - -import pytest -from gradio import components - -from gradio_client.serializing import COMPONENT_MAPPING, FileSerializable, Serializable -from gradio_client.utils import SKIP_COMPONENTS, encode_url_or_file_to_base64 - - -@pytest.mark.parametrize("serializer_class", Serializable.__subclasses__()) -def test_duplicate(serializer_class): - if "gradio_client" not in serializer_class.__module__: - pytest.skip(f"{serializer_class} not defined in gradio_client") - serializer = serializer_class() - info = serializer.api_info() - assert "info" in info and "serialized_info" in info - if "serialized_info" in info: - assert serializer.serialized_info() - - -def test_check_component_fallback_serializers(): - for component_name, class_type in COMPONENT_MAPPING.items(): - # skip components that cannot be instantiated without parameters - if component_name in SKIP_COMPONENTS: - continue - component = components.get_component_instance(component_name) - assert isinstance(component, class_type) - - -def test_all_components_in_component_mapping(all_components): - for component in all_components: - assert component.__name__.lower() in COMPONENT_MAPPING - - -def test_file_serializing(): - try: - serializing = FileSerializable() - with tempfile.NamedTemporaryFile(delete=False, mode="w") as f1: - with tempfile.NamedTemporaryFile(delete=False, mode="w") as f2: - f1.write("Hello World!") - f2.write("Greetings!") - - output = serializing.serialize(f1.name) - assert output["data"] == encode_url_or_file_to_base64(f1.name) - output = serializing.serialize([f1.name, f2.name]) - assert output[0]["data"] == encode_url_or_file_to_base64(f1.name) - assert output[1]["data"] == encode_url_or_file_to_base64(f2.name) - - # no-op for dict - assert serializing.serialize(output) == output - - files = serializing.deserialize(output) - with open(files[0]) as f: - assert f.read() == "Hello World!" - with open(files[1]) as f: - assert f.read() == "Greetings!" - finally: - os.remove(f1.name) - os.remove(f2.name) diff --git a/client/python/test/test_utils.py b/client/python/test/test_utils.py index 21ceb0c12f05..3208d42d1747 100644 --- a/client/python/test/test_utils.py +++ b/client/python/test/test_utils.py @@ -66,20 +66,22 @@ def test_decode_base64_to_file(): assert isinstance(temp_file, tempfile._TemporaryFileWrapper) -def test_download_private_file(): +def test_download_private_file(gradio_temp_dir): url_path = "https://gradio-tests-not-actually-private-space.hf.space/file=lion.jpg" hf_token = "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" # Intentionally revealing this key for testing purposes - file = utils.download_tmp_copy_of_file(url_path=url_path, hf_token=hf_token) + file = utils.download_file( + url_path=url_path, hf_token=hf_token, dir=str(gradio_temp_dir) + ) assert Path(file).name.endswith(".jpg") -def test_download_tmp_copy_of_file_does_not_save_errors(monkeypatch): +def test_download_tmp_copy_of_file_does_not_save_errors(monkeypatch, gradio_temp_dir): error_response = requests.Response() error_response.status_code = 404 error_response.close = lambda: 0 # Mock close method to avoid unrelated exception monkeypatch.setattr(requests, "get", lambda *args, **kwargs: error_response) with pytest.raises(requests.RequestException): - utils.download_tmp_copy_of_file("https://example.com/foo") + utils.download_file("https://example.com/foo", dir=str(gradio_temp_dir)) @pytest.mark.parametrize( diff --git a/demo/all_demos/tmp.zip b/demo/all_demos/tmp.zip deleted file mode 100644 index b1782ff5d98d..000000000000 Binary files a/demo/all_demos/tmp.zip and /dev/null differ diff --git a/demo/audio_debugger/run.ipynb b/demo/audio_debugger/run.ipynb index 4bb1dbd1b765..336800b78b39 100644 --- a/demo/audio_debugger/run.ipynb +++ b/demo/audio_debugger/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/audio_debugger/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "import os\n", "\n", "audio_file = os.path.join(os.path.abspath(''), \"cantina.wav\")\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file)\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(lambda x:x, \"audio\", \"audio\", examples=[audio_file])\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), \"text\", \"text\")\n", " \n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", " \n", " demo.load(get_ip, None, ip)\n", " \n", "if __name__ == \"__main__\":\n", " demo.queue()\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: audio_debugger"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/audio_debugger/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import subprocess\n", "import os\n", "\n", "audio_file = os.path.join(os.path.abspath(''), \"cantina.wav\")\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Tab(\"Audio\"):\n", " gr.Audio(audio_file)\n", " with gr.Tab(\"Interface\"):\n", " gr.Interface(lambda x:x, \"audio\", \"audio\", examples=[audio_file])\n", " with gr.Tab(\"console\"):\n", " ip = gr.Textbox(label=\"User IP Address\")\n", " gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), \"text\", \"text\")\n", " \n", " def get_ip(request: gr.Request):\n", " return request.client.host\n", " \n", " demo.load(get_ip, None, ip)\n", " \n", "if __name__ == \"__main__\":\n", " demo.queue()\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} diff --git a/demo/audio_debugger/run.py b/demo/audio_debugger/run.py index 5c66e4486076..d9be08c0bc41 100644 --- a/demo/audio_debugger/run.py +++ b/demo/audio_debugger/run.py @@ -9,7 +9,7 @@ with gr.Tab("Audio"): gr.Audio(audio_file) with gr.Tab("Interface"): - gr.Interface(lambda x:x, "audio", "audio", examples=[audio_file]) + gr.Interface(lambda x:x, "audio", "audio", examples=[audio_file], cache_examples=True) with gr.Tab("console"): ip = gr.Textbox(label="User IP Address") gr.Interface(lambda cmd:subprocess.run([cmd], capture_output=True, shell=True).stdout.decode('utf-8').strip(), "text", "text") diff --git a/demo/blocks_essay/run.ipynb b/demo/blocks_essay/run.ipynb index 5664f8bd40e0..6ca9dfb89a71 100644 --- a/demo/blocks_essay/run.ipynb +++ b/demo/blocks_essay/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\")\n", " else:\n", " return gr.Textbox(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", " \n", " minimum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " maximum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " num.submit(lambda x:x, num, out)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\")\n", " else:\n", " return gr.Textbox(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", " \n", " minimum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " maximum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " num.submit(lambda x:x, num, out)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox(lines=8, visible=True, value=\"Lorem ipsum dolor sit amet\")\n", " else:\n", " return gr.Textbox(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", " with gr.Row():\n", " num = gr.Number(minimum=0, maximum=100, label=\"input\")\n", " out = gr.Number(label=\"output\")\n", " minimum_slider = gr.Slider(0, 100, 0, label=\"min\")\n", " maximum_slider = gr.Slider(0, 100, 100, label=\"max\")\n", "\n", " def reset_bounds(minimum, maximum):\n", " return gr.Number(minimum=minimum, maximum=maximum)\n", " \n", " minimum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " maximum_slider.change(reset_bounds, [minimum_slider, maximum_slider], outputs=num)\n", " num.submit(lambda x:x, num, out)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/blocks_outputs/run.ipynb b/demo/blocks_outputs/run.ipynb index 5ee51f02d376..a2505d427420 100644 --- a/demo/blocks_outputs/run.ipynb +++ b/demo/blocks_outputs/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_outputs"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def make_markdown():\n", " return [\n", " [\n", " \"# hello again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello again again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello thrice\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1, show_label=False)\n", " txt = gr.Textbox(label=\"Large Textbox\", lines=5, show_label=False)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " video = gr.Video(show_label=False)\n", " image = gr.Image(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " html = gr.HTML(show_label=False)\n", " json = gr.JSON(show_label=False)\n", " md = gr.Markdown(show_label=False)\n", " label = gr.Label(show_label=False)\n", " highlight = gr.HighlightedText(show_label=False)\n", " gr.Dataframe(interactive=True, col_count=(3, \"fixed\"), label=\"Dataframe\")\n", " gr.Dataframe(interactive=True, col_count=4, label=\"Dataframe\")\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], label=\"Dataframe\"\n", " )\n", " gr.Dataframe(\n", " interactive=True,\n", " headers=[\"One\", \"Two\", \"Three\", \"Four\"],\n", " col_count=(4, \"fixed\"),\n", " row_count=(7, \"fixed\"),\n", " value=[[0, 0, 0, 0]],\n", " label=\"Dataframe\",\n", " )\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], col_count=4\n", " )\n", " df = gr.DataFrame(\n", " [\n", " [\n", " \"# hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ],\n", " headers=[\"One\", \"Two\", \"Three\"],\n", " wrap=True,\n", " datatype=[\"markdown\", \"markdown\", \"html\"],\n", " interactive=True,\n", " )\n", " btn = gr.Button(\"Run\")\n", " btn.click(fn=make_markdown, inputs=None, outputs=df)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_outputs"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def make_markdown():\n", " return [\n", " [\n", " \"# hello again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello again again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello thrice\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1, show_label=False)\n", " txt = gr.Textbox(label=\"Large Textbox\", lines=5, show_label=False)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " video = gr.Video(show_label=False)\n", " image = gr.Image(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " html = gr.HTML(show_label=False)\n", " json = gr.JSON(show_label=False)\n", " md = gr.Markdown(show_label=False)\n", " label = gr.Label(show_label=False)\n", " highlight = gr.HighlightedText(show_label=False)\n", " gr.Dataframe(interactive=True, col_count=(3, \"fixed\"), label=\"Dataframe\")\n", " gr.Dataframe(interactive=True, col_count=4, label=\"Dataframe\")\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], label=\"Dataframe\"\n", " )\n", " gr.Dataframe(\n", " interactive=True,\n", " headers=[\"One\", \"Two\", \"Three\", \"Four\"],\n", " col_count=(4, \"fixed\"),\n", " row_count=(7, \"fixed\"),\n", " value=[[0, 0, 0, 0]],\n", " label=\"Dataframe\",\n", " )\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], col_count=4\n", " )\n", " df = gr.DataFrame(\n", " [\n", " [\n", " \"# hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ],\n", " headers=[\"One\", \"Two\", \"Three\"],\n", " wrap=True,\n", " datatype=[\"markdown\", \"markdown\", \"html\"],\n", " interactive=True,\n", " )\n", " btn = gr.Button(\"Run\")\n", " btn.click(fn=make_markdown, inputs=None, outputs=df)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_outputs"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def make_markdown():\n", " return [\n", " [\n", " \"# hello again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello again again\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello thrice\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1, show_label=False)\n", " txt = gr.Textbox(label=\"Large Textbox\", lines=5, show_label=False)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " video = gr.Video(show_label=False)\n", " image = gr.Image(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " html = gr.HTML(show_label=False)\n", " json = gr.JSON(show_label=False)\n", " md = gr.Markdown(show_label=False)\n", " label = gr.Label(show_label=False)\n", " highlight = gr.HighlightedText(show_label=False)\n", " gr.Dataframe(interactive=True, col_count=(3, \"fixed\"), label=\"Dataframe\")\n", " gr.Dataframe(interactive=True, col_count=4, label=\"Dataframe\")\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], label=\"Dataframe\"\n", " )\n", " gr.Dataframe(\n", " interactive=True,\n", " headers=[\"One\", \"Two\", \"Three\", \"Four\"],\n", " col_count=(4, \"fixed\"),\n", " row_count=(7, \"fixed\"),\n", " value=[[0, 0, 0, 0]],\n", " label=\"Dataframe\",\n", " )\n", " gr.Dataframe(\n", " interactive=True, headers=[\"One\", \"Two\", \"Three\", \"Four\"], col_count=4\n", " )\n", " df = gr.DataFrame(\n", " [\n", " [\n", " \"# hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"## hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " [\n", " \"### hello\",\n", " \"Hello my name is frank, I am liking the small turtle you have there. It would be a shame if it went missing.\",\n", " '',\n", " ],\n", " ],\n", " headers=[\"One\", \"Two\", \"Three\"],\n", " wrap=True,\n", " datatype=[\"markdown\", \"markdown\", \"html\"],\n", " interactive=True,\n", " )\n", " btn = gr.Button(\"Run\")\n", " btn.click(fn=make_markdown, inputs=None, outputs=df)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/blocks_outputs/run.py b/demo/blocks_outputs/run.py index 084be0da9c4b..cd0b4d25a23f 100644 --- a/demo/blocks_outputs/run.py +++ b/demo/blocks_outputs/run.py @@ -41,7 +41,6 @@ def make_markdown(): file = gr.File(show_label=False) video = gr.Video(show_label=False) image = gr.Image(show_label=False) - ts = gr.Timeseries(show_label=False) df = gr.Dataframe(show_label=False) html = gr.HTML(show_label=False) json = gr.JSON(show_label=False) diff --git a/demo/blocks_style/run.ipynb b/demo/blocks_style/run.ipynb index 98b36fb2e819..65b1b200f7a7 100644 --- a/demo/blocks_style/run.ipynb +++ b/demo/blocks_style/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " label = gr.Label(container=False)\n", " highlight = gr.HighlightedText(\n", " [(\"hello\", None), (\"goodbye\", \"-\")],\n", " color_map={\"+\": \"green\", \"-\": \"red\"},\n", " container=False,\n", " )\n", " json = gr.JSON(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery(\n", " columns=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")])\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\")\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " label = gr.Label(container=False)\n", " highlight = gr.HighlightedText(\n", " [(\"hello\", None), (\"goodbye\", \"-\")],\n", " color_map={\"+\": \"green\", \"-\": \"red\"},\n", " container=False,\n", " )\n", " json = gr.JSON(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery(\n", " columns=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")])\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\")\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " label = gr.Label(container=False)\n", " highlight = gr.HighlightedText(\n", " [(\"hello\", None), (\"goodbye\", \"-\")],\n", " color_map={\"+\": \"green\", \"-\": \"red\"},\n", " container=False,\n", " )\n", " json = gr.JSON(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery(\n", " columns=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")])\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\")\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/blocks_style/run.py b/demo/blocks_style/run.py index 9e40fcdc2133..a4feb0e0b34f 100644 --- a/demo/blocks_style/run.py +++ b/demo/blocks_style/run.py @@ -24,7 +24,6 @@ audio = gr.Audio(show_label=False) file = gr.File(show_label=False) df = gr.Dataframe(show_label=False) - ts = gr.Timeseries(show_label=False) label = gr.Label(container=False) highlight = gr.HighlightedText( [("hello", None), ("goodbye", "-")], diff --git a/demo/bokeh_plot/run.ipynb b/demo/bokeh_plot/run.ipynb index f37fb0c8fab1..43cddee02193 100644 --- a/demo/bokeh_plot/run.ipynb +++ b/demo/bokeh_plot/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(xyz.OpenStreetMap.Mapnik)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(xyz.OpenStreetMap.Mapnik)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(xyz.OpenStreetMap.Mapnik)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/calculator/run.ipynb b/demo/calculator/run.ipynb index d1716b1f97a2..8b4e1433c39c 100644 --- a/demo/calculator/run.ipynb +++ b/demo/calculator/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\", \n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "#from foo import BAR\n", "#\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\", \n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [45, \"add\", 3],\n", " [3.14, \"divide\", 2],\n", " [144, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/log.csv https://github.com/gradio-app/gradio/raw/main/demo/calculator/examples/log.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " if num2 == 0:\n", " raise gr.Error(\"Cannot divide by zero!\")\n", " return num1 / num2\n", "\n", "demo = gr.Interface(\n", " calculator,\n", " [\n", " \"number\", \n", " gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"]),\n", " \"number\"\n", " ],\n", " \"number\",\n", " examples=[\n", " [5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2],\n", " ],\n", " title=\"Toy Calculator\",\n", " description=\"Here's a sample toy calculator. Allows you to calculate things like $2+2=4$\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/calculator/run.py b/demo/calculator/run.py index a640a33409e9..9ee04812fac6 100644 --- a/demo/calculator/run.py +++ b/demo/calculator/run.py @@ -1,5 +1,6 @@ import gradio as gr - +#from foo import BAR +# def calculator(num1, operation, num2): if operation == "add": return num1 + num2 @@ -21,13 +22,14 @@ def calculator(num1, operation, num2): ], "number", examples=[ - [5, "add", 3], - [4, "divide", 2], - [-4, "multiply", 2.5], + [45, "add", 3], + [3.14, "divide", 2], + [144, "multiply", 2.5], [0, "subtract", 1.2], ], title="Toy Calculator", description="Here's a sample toy calculator. Allows you to calculate things like $2+2=4$", ) + if __name__ == "__main__": demo.launch() diff --git a/demo/calculator_blocks/run.ipynb b/demo/calculator_blocks/run.ipynb index aaab76f76163..bad9e149406b 100644 --- a/demo/calculator_blocks/run.ipynb +++ b/demo/calculator_blocks/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " return num1 / num2\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " num_1 = gr.Number(value=4)\n", " operation = gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", " num_2 = gr.Number(value=0)\n", " submit_btn = gr.Button(value=\"Calculate\")\n", " with gr.Column():\n", " result = gr.Number()\n", "\n", " submit_btn.click(calculator, inputs=[num_1, operation, num_2], outputs=[result])\n", " examples = gr.Examples(examples=[[5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2]],\n", " inputs=[num_1, operation, num_2])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: calculator_blocks"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " return num1 / num2\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " num_1 = gr.Number(value=4)\n", " operation = gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", " num_2 = gr.Number(value=0)\n", " submit_btn = gr.Button(value=\"Calculate\")\n", " with gr.Column():\n", " result = gr.Number()\n", "\n", " submit_btn.click(calculator, inputs=[num_1, operation, num_2], outputs=[result], api_name=False)\n", " examples = gr.Examples(examples=[[5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2]],\n", " inputs=[num_1, operation, num_2])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch(show_api=False)"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: calculator_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def calculator(num1, operation, num2):\n", " if operation == \"add\":\n", " return num1 + num2\n", " elif operation == \"subtract\":\n", " return num1 - num2\n", " elif operation == \"multiply\":\n", " return num1 * num2\n", " elif operation == \"divide\":\n", " return num1 / num2\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " num_1 = gr.Number(value=4)\n", " operation = gr.Radio([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", " num_2 = gr.Number(value=0)\n", " submit_btn = gr.Button(value=\"Calculate\")\n", " with gr.Column():\n", " result = gr.Number()\n", "\n", " submit_btn.click(calculator, inputs=[num_1, operation, num_2], outputs=[result])\n", " examples = gr.Examples(examples=[[5, \"add\", 3],\n", " [4, \"divide\", 2],\n", " [-4, \"multiply\", 2.5],\n", " [0, \"subtract\", 1.2]],\n", " inputs=[num_1, operation, num_2])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/calculator_blocks/run.py b/demo/calculator_blocks/run.py index 957b8d9ab879..21b47b94bd1a 100644 --- a/demo/calculator_blocks/run.py +++ b/demo/calculator_blocks/run.py @@ -22,7 +22,7 @@ def calculator(num1, operation, num2): with gr.Column(): result = gr.Number() - submit_btn.click(calculator, inputs=[num_1, operation, num_2], outputs=[result]) + submit_btn.click(calculator, inputs=[num_1, operation, num_2], outputs=[result], api_name=False) examples = gr.Examples(examples=[[5, "add", 3], [4, "divide", 2], [-4, "multiply", 2.5], @@ -30,4 +30,4 @@ def calculator(num1, operation, num2): inputs=[num_1, operation, num_2]) if __name__ == "__main__": - demo.launch() \ No newline at end of file + demo.launch(show_api=False) \ No newline at end of file diff --git a/demo/chatbot_multimodal/run.ipynb b/demo/chatbot_multimodal/run.ipynb index 4416963b958e..dfe33c0335e5 100644 --- a/demo/chatbot_multimodal/run.ipynb +++ b/demo/chatbot_multimodal/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/avatar.png"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.Textbox(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"avatar.png\"))),\n", " )\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(\n", " scale=4,\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " container=False,\n", " )\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/avatar.png"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.Textbox(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"avatar.png\"))),\n", " )\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(\n", " scale=4,\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " container=False,\n", " )\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot, api_name=\"bot_response\"\n", " )\n", " txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/avatar.png"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.Textbox(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"avatar.png\"))),\n", " )\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(\n", " scale=4,\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " container=False,\n", " )\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/chatbot_multimodal/run.py b/demo/chatbot_multimodal/run.py index 5d831d71fb4a..650905aa2bcf 100644 --- a/demo/chatbot_multimodal/run.py +++ b/demo/chatbot_multimodal/run.py @@ -42,7 +42,7 @@ def bot(history): btn = gr.UploadButton("📁", file_types=["image", "video", "audio"]) txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( - bot, chatbot, chatbot + bot, chatbot, chatbot, api_name="bot_response" ) txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then( diff --git a/demo/clear_components/run.ipynb b/demo/clear_components/run.ipynb index 66b9618ce3e7..a5b0c358523c 100644 --- a/demo/clear_components/run.ipynb +++ b/demo/clear_components/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"])\n", " ),\n", " gr.Timeseries(value=lambda: os.path.join(file_dir, \"time.csv\")),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '

I am red

',\n", " '

I am blue

',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any())\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"])\n", " ),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '

I am red

',\n", " '

I am blue

',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any())\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clear_components"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/clear_components/__init__.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from datetime import datetime\n", "import os\n", "import random\n", "import string\n", "import pandas as pd\n", "\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "\n", "\n", "def random_plot():\n", " start_year = 2020\n", " x = np.arange(start_year, start_year + 5)\n", " year_count = x.shape[0]\n", " plt_format = \"-\"\n", " fig = plt.figure()\n", " ax = fig.add_subplot(111)\n", " series = np.arange(0, year_count, dtype=float)\n", " series = series**2\n", " series += np.random.rand(year_count)\n", " ax.plot(x, series, plt_format)\n", " return fig\n", "\n", "\n", "images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", "]\n", "file_dir = os.path.join(os.path.abspath(''), \"..\", \"kitchen_sink\", \"files\")\n", "model3d_dir = os.path.join(os.path.abspath(''), \"..\", \"model3D\", \"files\")\n", "highlighted_text_output_1 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-MISC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistani\",\n", " \"start\": 22,\n", " \"end\": 31,\n", " },\n", "]\n", "highlighted_text_output_2 = [\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9988978,\n", " \"index\": 2,\n", " \"word\": \"Chicago\",\n", " \"start\": 5,\n", " \"end\": 12,\n", " },\n", " {\n", " \"entity\": \"I-LOC\",\n", " \"score\": 0.9958592,\n", " \"index\": 5,\n", " \"word\": \"Pakistan\",\n", " \"start\": 22,\n", " \"end\": 30,\n", " },\n", "]\n", "\n", "highlighted_text = \"Does Chicago have any Pakistani restaurants\"\n", "\n", "\n", "def random_model3d():\n", " model_3d = random.choice(\n", " [os.path.join(model3d_dir, model) for model in os.listdir(model3d_dir) if model != \"source.txt\"]\n", " )\n", " return model_3d\n", "\n", "\n", "\n", "components = [\n", " gr.Textbox(value=lambda: datetime.now(), label=\"Current Time\"),\n", " gr.Number(value=lambda: random.random(), label=\"Random Percentage\"),\n", " gr.Slider(minimum=0, maximum=100, randomize=True, label=\"Slider with randomize\"),\n", " gr.Slider(\n", " minimum=0,\n", " maximum=1,\n", " value=lambda: random.random(),\n", " label=\"Slider with value func\",\n", " ),\n", " gr.Checkbox(value=lambda: random.random() > 0.5, label=\"Random Checkbox\"),\n", " gr.CheckboxGroup(\n", " choices=[\"a\", \"b\", \"c\", \"d\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\", \"d\"]),\n", " label=\"Random CheckboxGroup\",\n", " ),\n", " gr.Radio(\n", " choices=list(string.ascii_lowercase),\n", " value=lambda: random.choice(string.ascii_lowercase),\n", " ),\n", " gr.Dropdown(\n", " choices=[\"a\", \"b\", \"c\", \"d\", \"e\"],\n", " value=lambda: random.choice([\"a\", \"b\", \"c\"]),\n", " ),\n", " gr.Image(\n", " value=lambda: random.choice(images)\n", " ),\n", " gr.Video(value=lambda: os.path.join(file_dir, \"world.mp4\")),\n", " gr.Audio(value=lambda: os.path.join(file_dir, \"cantina.wav\")),\n", " gr.File(\n", " value=lambda: random.choice(\n", " [os.path.join(file_dir, img) for img in os.listdir(file_dir)]\n", " )\n", " ),\n", " gr.Dataframe(\n", " value=lambda: pd.DataFrame({\"random_number_rows\": range(5)}, columns=[\"one\", \"two\", \"three\"])\n", " ),\n", " gr.Timeseries(value=lambda: os.path.join(file_dir, \"time.csv\")),\n", " gr.ColorPicker(value=lambda: random.choice([\"#000000\", \"#ff0000\", \"#0000FF\"])),\n", " gr.Label(value=lambda: random.choice([\"Pedestrian\", \"Car\", \"Cyclist\"])),\n", " gr.HighlightedText(\n", " value=lambda: random.choice(\n", " [\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_1},\n", " {\"text\": highlighted_text, \"entities\": highlighted_text_output_2},\n", " ]\n", " ),\n", " ),\n", " gr.JSON(value=lambda: random.choice([{\"a\": 1}, {\"b\": 2}])),\n", " gr.HTML(\n", " value=lambda: random.choice(\n", " [\n", " '

I am red

',\n", " '

I am blue

',\n", " ]\n", " )\n", " ),\n", " gr.Gallery(\n", " value=lambda: images\n", " ),\n", " gr.Model3D(value=random_model3d),\n", " gr.Plot(value=random_plot),\n", " gr.Markdown(value=lambda: f\"### {random.choice(['Hello', 'Hi', 'Goodbye!'])}\"),\n", "]\n", "\n", "\n", "def evaluate_values(*args):\n", " are_false = []\n", " for a in args:\n", " if isinstance(a, (pd.DataFrame, np.ndarray)):\n", " are_false.append(not a.any().any())\n", " elif isinstance(a, str) and a.startswith(\"#\"):\n", " are_false.append(a == \"#000000\")\n", " else:\n", " are_false.append(not a)\n", " return all(are_false)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " for i, component in enumerate(components):\n", " component.label = f\"component_{str(i).zfill(2)}\"\n", " component.render()\n", " clear = gr.ClearButton(value=\"Clear\", components=components)\n", " result = gr.Textbox(label=\"Are all cleared?\")\n", " hide = gr.Button(value=\"Hide\")\n", " reveal = gr.Button(value=\"Reveal\")\n", " hide.click(\n", " lambda: [c.__class__(visible=False) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " reveal.click(\n", " lambda: [c.__class__(visible=True) for c in components],\n", " inputs=[],\n", " outputs=components\n", " )\n", " get_value = gr.Button(value=\"Get Values\")\n", " get_value.click(evaluate_values, components, result)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/clear_components/run.py b/demo/clear_components/run.py index d3eb8361fec2..d60e73621d6f 100644 --- a/demo/clear_components/run.py +++ b/demo/clear_components/run.py @@ -116,7 +116,6 @@ def random_model3d(): gr.Dataframe( value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"]) ), - gr.Timeseries(value=lambda: os.path.join(file_dir, "time.csv")), gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])), gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])), gr.HighlightedText( diff --git a/demo/clustering/run.ipynb b/demo/clustering/run.ipynb index 46792c26f798..b21a4c5d6ff9 100644 --- a/demo/clustering/run.ipynb +++ b/demo/clustering/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clustering\n", "### This demo built with Blocks generates 9 plots based on the input.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio matplotlib>=3.5.2 scikit-learn>=1.0.1 "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import math\n", "from functools import partial\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from sklearn.cluster import (\n", " AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n", ")\n", "from sklearn.datasets import make_blobs, make_circles, make_moons\n", "from sklearn.mixture import GaussianMixture\n", "from sklearn.neighbors import kneighbors_graph\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "plt.style.use('seaborn-v0_8')\n", "SEED = 0\n", "MAX_CLUSTERS = 10\n", "N_SAMPLES = 1000\n", "N_COLS = 3\n", "FIGSIZE = 7, 7 # does not affect size in webpage\n", "COLORS = [\n", " 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n", "]\n", "if len(COLORS) <= MAX_CLUSTERS:\n", " raise ValueError(\"Not enough different colors for all clusters\")\n", "np.random.seed(SEED)\n", "\n", "\n", "def normalize(X):\n", " return StandardScaler().fit_transform(X)\n", "\n", "def get_regular(n_clusters):\n", " # spiral pattern\n", " centers = [\n", " [0, 0],\n", " [1, 0],\n", " [1, 1],\n", " [0, 1],\n", " [-1, 1],\n", " [-1, 0],\n", " [-1, -1],\n", " [0, -1],\n", " [1, -1],\n", " [2, -1],\n", " ][:n_clusters]\n", " assert len(centers) == n_clusters\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_circles(n_clusters):\n", " X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_moons(n_clusters):\n", " X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_noise(n_clusters):\n", " np.random.seed(SEED)\n", " X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n", " return normalize(X), labels\n", "\n", "\n", "def get_anisotropic(n_clusters):\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n", " transformation = [[0.6, -0.6], [-0.4, 0.8]]\n", " X = np.dot(X, transformation)\n", " return X, labels\n", "\n", "\n", "def get_varied(n_clusters):\n", " cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n", " assert len(cluster_std) == n_clusters\n", " X, labels = make_blobs(\n", " n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n", " )\n", " return normalize(X), labels\n", "\n", "\n", "def get_spiral(n_clusters):\n", " # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n", " np.random.seed(SEED)\n", " t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n", " x = t * np.cos(t)\n", " y = t * np.sin(t)\n", " X = np.concatenate((x, y))\n", " X += 0.7 * np.random.randn(2, N_SAMPLES)\n", " X = np.ascontiguousarray(X.T)\n", "\n", " labels = np.zeros(N_SAMPLES, dtype=int)\n", " return normalize(X), labels\n", "\n", "\n", "DATA_MAPPING = {\n", " 'regular': get_regular,\n", " 'circles': get_circles,\n", " 'moons': get_moons,\n", " 'spiral': get_spiral,\n", " 'noise': get_noise,\n", " 'anisotropic': get_anisotropic,\n", " 'varied': get_varied,\n", "}\n", "\n", "\n", "def get_groundtruth_model(X, labels, n_clusters, **kwargs):\n", " # dummy model to show true label distribution\n", " class Dummy:\n", " def __init__(self, y):\n", " self.labels_ = labels\n", "\n", " return Dummy(labels)\n", "\n", "\n", "def get_kmeans(X, labels, n_clusters, **kwargs):\n", " model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_dbscan(X, labels, n_clusters, **kwargs):\n", " model = DBSCAN(eps=0.3)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_agglomerative(X, labels, n_clusters, **kwargs):\n", " connectivity = kneighbors_graph(\n", " X, n_neighbors=n_clusters, include_self=False\n", " )\n", " # make connectivity symmetric\n", " connectivity = 0.5 * (connectivity + connectivity.T)\n", " model = AgglomerativeClustering(\n", " n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_meanshift(X, labels, n_clusters, **kwargs):\n", " bandwidth = estimate_bandwidth(X, quantile=0.25)\n", " model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_spectral(X, labels, n_clusters, **kwargs):\n", " model = SpectralClustering(\n", " n_clusters=n_clusters,\n", " eigen_solver=\"arpack\",\n", " affinity=\"nearest_neighbors\",\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_optics(X, labels, n_clusters, **kwargs):\n", " model = OPTICS(\n", " min_samples=7,\n", " xi=0.05,\n", " min_cluster_size=0.1,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_birch(X, labels, n_clusters, **kwargs):\n", " model = Birch(n_clusters=n_clusters)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_gaussianmixture(X, labels, n_clusters, **kwargs):\n", " model = GaussianMixture(\n", " n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "MODEL_MAPPING = {\n", " 'True labels': get_groundtruth_model,\n", " 'KMeans': get_kmeans,\n", " 'DBSCAN': get_dbscan,\n", " 'MeanShift': get_meanshift,\n", " 'SpectralClustering': get_spectral,\n", " 'OPTICS': get_optics,\n", " 'Birch': get_birch,\n", " 'GaussianMixture': get_gaussianmixture,\n", " 'AgglomerativeClustering': get_agglomerative,\n", "}\n", "\n", "\n", "def plot_clusters(ax, X, labels):\n", " set_clusters = set(labels)\n", " set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n", " for label, color in zip(sorted(set_clusters), COLORS):\n", " idx = labels == label\n", " if not sum(idx):\n", " continue\n", " ax.scatter(X[idx, 0], X[idx, 1], color=color)\n", "\n", " # show outliers (if any)\n", " idx = labels == -1\n", " if sum(idx):\n", " ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n", "\n", " ax.grid(None)\n", " ax.set_xticks([])\n", " ax.set_yticks([])\n", " return ax\n", "\n", "\n", "def cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n", " if isinstance(n_clusters, dict):\n", " n_clusters = n_clusters['value']\n", " else:\n", " n_clusters = int(n_clusters)\n", "\n", " X, labels = DATA_MAPPING[dataset](n_clusters)\n", " model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n", " if hasattr(model, \"labels_\"):\n", " y_pred = model.labels_.astype(int)\n", " else:\n", " y_pred = model.predict(X)\n", "\n", " fig, ax = plt.subplots(figsize=FIGSIZE)\n", "\n", " plot_clusters(ax, X, y_pred)\n", " ax.set_title(clustering_algorithm, fontsize=16)\n", "\n", " return fig\n", "\n", "\n", "title = \"Clustering with Scikit-learn\"\n", "description = (\n", " \"This example shows how different clustering algorithms work. Simply pick \"\n", " \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n", " \"Colored circles are (predicted) labels and black x are outliers.\"\n", ")\n", "\n", "\n", "def iter_grid(n_rows, n_cols):\n", " # create a grid using gradio Block\n", " for _ in range(n_rows):\n", " with gr.Row():\n", " for _ in range(n_cols):\n", " with gr.Column():\n", " yield\n", "\n", "with gr.Blocks(title=title) as demo:\n", " gr.HTML(f\"{title}\")\n", " gr.Markdown(description)\n", "\n", " input_models = list(MODEL_MAPPING)\n", " input_data = gr.Radio(\n", " list(DATA_MAPPING),\n", " value=\"regular\",\n", " label=\"dataset\"\n", " )\n", " input_n_clusters = gr.Slider(\n", " minimum=1,\n", " maximum=MAX_CLUSTERS,\n", " value=4,\n", " step=1,\n", " label='Number of clusters'\n", " )\n", " n_rows = int(math.ceil(len(input_models) / N_COLS))\n", " counter = 0\n", " for _ in iter_grid(n_rows, N_COLS):\n", " if counter >= len(input_models):\n", " break\n", "\n", " input_model = input_models[counter]\n", " plot = gr.Plot(label=input_model)\n", " fn = partial(cluster, clustering_algorithm=input_model)\n", " input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " counter += 1\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: clustering\n", "### This demo built with Blocks generates 9 plots based on the input.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio matplotlib>=3.5.2 scikit-learn>=1.0.1 "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import math\n", "from functools import partial\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from sklearn.cluster import (\n", " AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n", ")\n", "from sklearn.datasets import make_blobs, make_circles, make_moons\n", "from sklearn.mixture import GaussianMixture\n", "from sklearn.neighbors import kneighbors_graph\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "plt.style.use('seaborn-v0_8')\n", "SEED = 0\n", "MAX_CLUSTERS = 10\n", "N_SAMPLES = 1000\n", "N_COLS = 3\n", "FIGSIZE = 7, 7 # does not affect size in webpage\n", "COLORS = [\n", " 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n", "]\n", "if len(COLORS) <= MAX_CLUSTERS:\n", " raise ValueError(\"Not enough different colors for all clusters\")\n", "np.random.seed(SEED)\n", "\n", "\n", "def normalize(X):\n", " return StandardScaler().fit_transform(X)\n", "\n", "def get_regular(n_clusters):\n", " # spiral pattern\n", " centers = [\n", " [0, 0],\n", " [1, 0],\n", " [1, 1],\n", " [0, 1],\n", " [-1, 1],\n", " [-1, 0],\n", " [-1, -1],\n", " [0, -1],\n", " [1, -1],\n", " [2, -1],\n", " ][:n_clusters]\n", " assert len(centers) == n_clusters\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_circles(n_clusters):\n", " X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_moons(n_clusters):\n", " X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_noise(n_clusters):\n", " np.random.seed(SEED)\n", " X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n", " return normalize(X), labels\n", "\n", "\n", "def get_anisotropic(n_clusters):\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n", " transformation = [[0.6, -0.6], [-0.4, 0.8]]\n", " X = np.dot(X, transformation)\n", " return X, labels\n", "\n", "\n", "def get_varied(n_clusters):\n", " cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n", " assert len(cluster_std) == n_clusters\n", " X, labels = make_blobs(\n", " n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n", " )\n", " return normalize(X), labels\n", "\n", "\n", "def get_spiral(n_clusters):\n", " # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n", " np.random.seed(SEED)\n", " t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n", " x = t * np.cos(t)\n", " y = t * np.sin(t)\n", " X = np.concatenate((x, y))\n", " X += 0.7 * np.random.randn(2, N_SAMPLES)\n", " X = np.ascontiguousarray(X.T)\n", "\n", " labels = np.zeros(N_SAMPLES, dtype=int)\n", " return normalize(X), labels\n", "\n", "\n", "DATA_MAPPING = {\n", " 'regular': get_regular,\n", " 'circles': get_circles,\n", " 'moons': get_moons,\n", " 'spiral': get_spiral,\n", " 'noise': get_noise,\n", " 'anisotropic': get_anisotropic,\n", " 'varied': get_varied,\n", "}\n", "\n", "\n", "def get_groundtruth_model(X, labels, n_clusters, **kwargs):\n", " # dummy model to show true label distribution\n", " class Dummy:\n", " def __init__(self, y):\n", " self.labels_ = labels\n", "\n", " return Dummy(labels)\n", "\n", "\n", "def get_kmeans(X, labels, n_clusters, **kwargs):\n", " model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_dbscan(X, labels, n_clusters, **kwargs):\n", " model = DBSCAN(eps=0.3)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_agglomerative(X, labels, n_clusters, **kwargs):\n", " connectivity = kneighbors_graph(\n", " X, n_neighbors=n_clusters, include_self=False\n", " )\n", " # make connectivity symmetric\n", " connectivity = 0.5 * (connectivity + connectivity.T)\n", " model = AgglomerativeClustering(\n", " n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_meanshift(X, labels, n_clusters, **kwargs):\n", " bandwidth = estimate_bandwidth(X, quantile=0.25)\n", " model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_spectral(X, labels, n_clusters, **kwargs):\n", " model = SpectralClustering(\n", " n_clusters=n_clusters,\n", " eigen_solver=\"arpack\",\n", " affinity=\"nearest_neighbors\",\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_optics(X, labels, n_clusters, **kwargs):\n", " model = OPTICS(\n", " min_samples=7,\n", " xi=0.05,\n", " min_cluster_size=0.1,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_birch(X, labels, n_clusters, **kwargs):\n", " model = Birch(n_clusters=n_clusters)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_gaussianmixture(X, labels, n_clusters, **kwargs):\n", " model = GaussianMixture(\n", " n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "MODEL_MAPPING = {\n", " 'True labels': get_groundtruth_model,\n", " 'KMeans': get_kmeans,\n", " 'DBSCAN': get_dbscan,\n", " 'MeanShift': get_meanshift,\n", " 'SpectralClustering': get_spectral,\n", " 'OPTICS': get_optics,\n", " 'Birch': get_birch,\n", " 'GaussianMixture': get_gaussianmixture,\n", " 'AgglomerativeClustering': get_agglomerative,\n", "}\n", "\n", "\n", "def plot_clusters(ax, X, labels):\n", " set_clusters = set(labels)\n", " set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n", " for label, color in zip(sorted(set_clusters), COLORS):\n", " idx = labels == label\n", " if not sum(idx):\n", " continue\n", " ax.scatter(X[idx, 0], X[idx, 1], color=color)\n", "\n", " # show outliers (if any)\n", " idx = labels == -1\n", " if sum(idx):\n", " ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n", "\n", " ax.grid(None)\n", " ax.set_xticks([])\n", " ax.set_yticks([])\n", " return ax\n", "\n", "\n", "def cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n", " if isinstance(n_clusters, dict):\n", " n_clusters = n_clusters['value']\n", " else:\n", " n_clusters = int(n_clusters)\n", "\n", " X, labels = DATA_MAPPING[dataset](n_clusters)\n", " model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n", " if hasattr(model, \"labels_\"):\n", " y_pred = model.labels_.astype(int)\n", " else:\n", " y_pred = model.predict(X)\n", "\n", " fig, ax = plt.subplots(figsize=FIGSIZE)\n", "\n", " plot_clusters(ax, X, y_pred)\n", " ax.set_title(clustering_algorithm, fontsize=16)\n", "\n", " return fig\n", "\n", "\n", "title = \"Clustering with Scikit-learn\"\n", "description = (\n", " \"This example shows how different clustering algorithms work. Simply pick \"\n", " \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n", " \"Colored circles are (predicted) labels and black x are outliers.\"\n", ")\n", "\n", "\n", "def iter_grid(n_rows, n_cols):\n", " # create a grid using gradio Block\n", " for _ in range(n_rows):\n", " with gr.Row():\n", " for _ in range(n_cols):\n", " with gr.Column():\n", " yield\n", "\n", "with gr.Blocks(title=title) as demo:\n", " gr.HTML(f\"{title}\")\n", " gr.Markdown(description)\n", "\n", " input_models = list(MODEL_MAPPING)\n", " input_data = gr.Radio(\n", " list(DATA_MAPPING),\n", " value=\"regular\",\n", " label=\"dataset\"\n", " )\n", " input_n_clusters = gr.Slider(\n", " minimum=1,\n", " maximum=MAX_CLUSTERS,\n", " value=4,\n", " step=1,\n", " label='Number of clusters'\n", " )\n", " n_rows = int(math.ceil(len(input_models) / N_COLS))\n", " counter = 0\n", " for _ in iter_grid(n_rows, N_COLS):\n", " if counter >= len(input_models):\n", " break\n", "\n", " input_model = input_models[counter]\n", " plot = gr.Plot(label=input_model)\n", " fn = partial(cluster, clustering_algorithm=input_model)\n", " input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " counter += 1\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: clustering\n", "### This demo built with Blocks generates 9 plots based on the input.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio matplotlib>=3.5.2 scikit-learn>=1.0.1 "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import math\n", "from functools import partial\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "from sklearn.cluster import (\n", " AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth\n", ")\n", "from sklearn.datasets import make_blobs, make_circles, make_moons\n", "from sklearn.mixture import GaussianMixture\n", "from sklearn.neighbors import kneighbors_graph\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "plt.style.use('seaborn-v0_8')\n", "SEED = 0\n", "MAX_CLUSTERS = 10\n", "N_SAMPLES = 1000\n", "N_COLS = 3\n", "FIGSIZE = 7, 7 # does not affect size in webpage\n", "COLORS = [\n", " 'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'\n", "]\n", "if len(COLORS) <= MAX_CLUSTERS:\n", " raise ValueError(\"Not enough different colors for all clusters\")\n", "np.random.seed(SEED)\n", "\n", "\n", "def normalize(X):\n", " return StandardScaler().fit_transform(X)\n", "\n", "def get_regular(n_clusters):\n", " # spiral pattern\n", " centers = [\n", " [0, 0],\n", " [1, 0],\n", " [1, 1],\n", " [0, 1],\n", " [-1, 1],\n", " [-1, 0],\n", " [-1, -1],\n", " [0, -1],\n", " [1, -1],\n", " [2, -1],\n", " ][:n_clusters]\n", " assert len(centers) == n_clusters\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_circles(n_clusters):\n", " X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_moons(n_clusters):\n", " X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)\n", " return normalize(X), labels\n", "\n", "\n", "def get_noise(n_clusters):\n", " np.random.seed(SEED)\n", " X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))\n", " return normalize(X), labels\n", "\n", "\n", "def get_anisotropic(n_clusters):\n", " X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)\n", " transformation = [[0.6, -0.6], [-0.4, 0.8]]\n", " X = np.dot(X, transformation)\n", " return X, labels\n", "\n", "\n", "def get_varied(n_clusters):\n", " cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]\n", " assert len(cluster_std) == n_clusters\n", " X, labels = make_blobs(\n", " n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED\n", " )\n", " return normalize(X), labels\n", "\n", "\n", "def get_spiral(n_clusters):\n", " # from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html\n", " np.random.seed(SEED)\n", " t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))\n", " x = t * np.cos(t)\n", " y = t * np.sin(t)\n", " X = np.concatenate((x, y))\n", " X += 0.7 * np.random.randn(2, N_SAMPLES)\n", " X = np.ascontiguousarray(X.T)\n", "\n", " labels = np.zeros(N_SAMPLES, dtype=int)\n", " return normalize(X), labels\n", "\n", "\n", "DATA_MAPPING = {\n", " 'regular': get_regular,\n", " 'circles': get_circles,\n", " 'moons': get_moons,\n", " 'spiral': get_spiral,\n", " 'noise': get_noise,\n", " 'anisotropic': get_anisotropic,\n", " 'varied': get_varied,\n", "}\n", "\n", "\n", "def get_groundtruth_model(X, labels, n_clusters, **kwargs):\n", " # dummy model to show true label distribution\n", " class Dummy:\n", " def __init__(self, y):\n", " self.labels_ = labels\n", "\n", " return Dummy(labels)\n", "\n", "\n", "def get_kmeans(X, labels, n_clusters, **kwargs):\n", " model = KMeans(init=\"k-means++\", n_clusters=n_clusters, n_init=10, random_state=SEED)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_dbscan(X, labels, n_clusters, **kwargs):\n", " model = DBSCAN(eps=0.3)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_agglomerative(X, labels, n_clusters, **kwargs):\n", " connectivity = kneighbors_graph(\n", " X, n_neighbors=n_clusters, include_self=False\n", " )\n", " # make connectivity symmetric\n", " connectivity = 0.5 * (connectivity + connectivity.T)\n", " model = AgglomerativeClustering(\n", " n_clusters=n_clusters, linkage=\"ward\", connectivity=connectivity\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_meanshift(X, labels, n_clusters, **kwargs):\n", " bandwidth = estimate_bandwidth(X, quantile=0.25)\n", " model = MeanShift(bandwidth=bandwidth, bin_seeding=True)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_spectral(X, labels, n_clusters, **kwargs):\n", " model = SpectralClustering(\n", " n_clusters=n_clusters,\n", " eigen_solver=\"arpack\",\n", " affinity=\"nearest_neighbors\",\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_optics(X, labels, n_clusters, **kwargs):\n", " model = OPTICS(\n", " min_samples=7,\n", " xi=0.05,\n", " min_cluster_size=0.1,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_birch(X, labels, n_clusters, **kwargs):\n", " model = Birch(n_clusters=n_clusters)\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "def get_gaussianmixture(X, labels, n_clusters, **kwargs):\n", " model = GaussianMixture(\n", " n_components=n_clusters, covariance_type=\"full\", random_state=SEED,\n", " )\n", " model.set_params(**kwargs)\n", " return model.fit(X)\n", "\n", "\n", "MODEL_MAPPING = {\n", " 'True labels': get_groundtruth_model,\n", " 'KMeans': get_kmeans,\n", " 'DBSCAN': get_dbscan,\n", " 'MeanShift': get_meanshift,\n", " 'SpectralClustering': get_spectral,\n", " 'OPTICS': get_optics,\n", " 'Birch': get_birch,\n", " 'GaussianMixture': get_gaussianmixture,\n", " 'AgglomerativeClustering': get_agglomerative,\n", "}\n", "\n", "\n", "def plot_clusters(ax, X, labels):\n", " set_clusters = set(labels)\n", " set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately\n", " for label, color in zip(sorted(set_clusters), COLORS):\n", " idx = labels == label\n", " if not sum(idx):\n", " continue\n", " ax.scatter(X[idx, 0], X[idx, 1], color=color)\n", "\n", " # show outliers (if any)\n", " idx = labels == -1\n", " if sum(idx):\n", " ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')\n", "\n", " ax.grid(None)\n", " ax.set_xticks([])\n", " ax.set_yticks([])\n", " return ax\n", "\n", "\n", "def cluster(dataset: str, n_clusters: int, clustering_algorithm: str):\n", " if isinstance(n_clusters, dict):\n", " n_clusters = n_clusters['value']\n", " else:\n", " n_clusters = int(n_clusters)\n", "\n", " X, labels = DATA_MAPPING[dataset](n_clusters)\n", " model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)\n", " if hasattr(model, \"labels_\"):\n", " y_pred = model.labels_.astype(int)\n", " else:\n", " y_pred = model.predict(X)\n", "\n", " fig, ax = plt.subplots(figsize=FIGSIZE)\n", "\n", " plot_clusters(ax, X, y_pred)\n", " ax.set_title(clustering_algorithm, fontsize=16)\n", "\n", " return fig\n", "\n", "\n", "title = \"Clustering with Scikit-learn\"\n", "description = (\n", " \"This example shows how different clustering algorithms work. Simply pick \"\n", " \"the dataset and the number of clusters to see how the clustering algorithms work. \"\n", " \"Colored circles are (predicted) labels and black x are outliers.\"\n", ")\n", "\n", "\n", "def iter_grid(n_rows, n_cols):\n", " # create a grid using gradio Block\n", " for _ in range(n_rows):\n", " with gr.Row():\n", " for _ in range(n_cols):\n", " with gr.Column():\n", " yield\n", "\n", "with gr.Blocks(title=title) as demo:\n", " gr.HTML(f\"{title}\")\n", " gr.Markdown(description)\n", "\n", " input_models = list(MODEL_MAPPING)\n", " input_data = gr.Radio(\n", " list(DATA_MAPPING),\n", " value=\"regular\",\n", " label=\"dataset\"\n", " )\n", " input_n_clusters = gr.Slider(\n", " minimum=1,\n", " maximum=MAX_CLUSTERS,\n", " value=4,\n", " step=1,\n", " label='Number of clusters'\n", " )\n", " n_rows = int(math.ceil(len(input_models) / N_COLS))\n", " counter = 0\n", " for _ in iter_grid(n_rows, N_COLS):\n", " if counter >= len(input_models):\n", " break\n", "\n", " input_model = input_models[counter]\n", " plot = gr.Plot(label=input_model)\n", " fn = partial(cluster, clustering_algorithm=input_model)\n", " input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)\n", " counter += 1\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/dashboard/run.ipynb b/demo/dashboard/run.ipynb index ca4d20de0769..453dad0ee04a 100644 --- a/demo/dashboard/run.ipynb +++ b/demo/dashboard/run.ipynb @@ -1 +1 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import *\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import *\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} diff --git a/demo/dataframe_colorful/run.ipynb b/demo/dataframe_colorful/run.ipynb index 83a30371f132..e7df93c5aa22 100644 --- a/demo/dataframe_colorful/run.ipynb +++ b/demo/dataframe_colorful/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_colorful"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd \n", "import gradio as gr\n", "\n", "df = pd.DataFrame({\"A\" : [14, 4, 5, 4, 1], \n", "\t\t\t\t\"B\" : [5, 2, 54, 3, 2], \n", "\t\t\t\t\"C\" : [20, 20, 7, 3, 8], \n", "\t\t\t\t\"D\" : [14, 3, 6, 2, 6], \n", "\t\t\t\t\"E\" : [23, 45, 64, 32, 23]}) \n", "\n", "t = df.style.highlight_max(color = 'lightgreen', axis = 0)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Dataframe(t)\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: dataframe_colorful"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import pandas as pd \n", "import gradio as gr\n", "\n", "df = pd.DataFrame({\"A\" : [14, 4, 5, 4, 1], \n", "\t\t\t\t\"B\" : [5, 2, 54, 3, 2], \n", "\t\t\t\t\"C\" : [20, 20, 7, 3, 8], \n", "\t\t\t\t\"D\" : [14, 3, 6, 2, 6], \n", "\t\t\t\t\"E\" : [23, 45, 64, 32, 23]}) \n", "\n", "t = df.style.highlight_max(color = 'lightgreen', axis = 0)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Dataframe(t)\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dataframe_colorful"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import pandas as pd \n", "import gradio as gr\n", "\n", "df = pd.DataFrame({\"A\" : [14, 4, 5, 4, 1], \n", "\t\t\t\t\"B\" : [5, 2, 54, 3, 2], \n", "\t\t\t\t\"C\" : [20, 20, 7, 3, 8], \n", "\t\t\t\t\"D\" : [14, 3, 6, 2, 6], \n", "\t\t\t\t\"E\" : [23, 45, 64, 32, 23]}) \n", "\n", "t = df.style.highlight_max(color = 'lightgreen', axis = 0)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Dataframe(t)\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/file_explorer/run.ipynb b/demo/file_explorer/run.ipynb index 34a79bc6e0a0..50d6410edf6f 100644 --- a/demo/file_explorer/run.ipynb +++ b/demo/file_explorer/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "current_file_path = Path(__file__).resolve()\n", "relative_path = \"path/to/file\"\n", "absolute_path = (current_file_path.parent / \"..\" / \"..\" / \"gradio\").resolve()\n", "\n", "\n", "def get_file_content(file):\n", " return (file,)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown('### `FileExplorer` to `FileExplorer` -- `file_count=\"multiple\"`')\n", " submit_btn = gr.Button(\"Select\")\n", " with gr.Row():\n", " file = gr.FileExplorer(\n", " glob=\"**/{components,themes}/*.py\",\n", " # value=[\"themes/utils\"],\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", "\n", " file2 = gr.FileExplorer(\n", " glob=\"**/{components,themes}/**/*.py\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", " submit_btn.click(lambda x: x, file, file2)\n", "\n", " gr.Markdown(\"---\")\n", " gr.Markdown('### `FileExplorer` to `Code` -- `file_count=\"single\"`')\n", " with gr.Group():\n", " with gr.Row():\n", " file_3 = gr.FileExplorer(\n", " scale=1,\n", " glob=\"**/{components,themes}/**/*.py\",\n", " value=[\"themes/utils\"],\n", " file_count=\"single\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " elem_id=\"file\",\n", " )\n", "\n", " code = gr.Code(lines=30, scale=2, language=\"python\")\n", "\n", " file_3.change(get_file_content, file_3, code)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: file_explorer"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "current_file_path = Path(__file__).resolve()\n", "relative_path = \"path/to/file\"\n", "absolute_path = (current_file_path.parent / \"..\" / \"..\" / \"gradio\").resolve()\n", "\n", "\n", "def get_file_content(file):\n", " return (file,)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown('### `FileExplorer` to `FileExplorer` -- `file_count=\"multiple\"`')\n", " submit_btn = gr.Button(\"Select\")\n", " with gr.Row():\n", " file = gr.FileExplorer(\n", " glob=\"**/{components,themes}/*.py\",\n", " # value=[\"themes/utils\"],\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", "\n", " file2 = gr.FileExplorer(\n", " glob=\"**/{components,themes}/**/*.py\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", " submit_btn.click(lambda x: x, file, file2)\n", "\n", " gr.Markdown(\"---\")\n", " gr.Markdown('### `FileExplorer` to `Code` -- `file_count=\"single\"`')\n", " with gr.Group():\n", " with gr.Row():\n", " file_3 = gr.FileExplorer(\n", " scale=1,\n", " glob=\"**/{components,themes}/**/*.py\",\n", " value=[\"themes/utils\"],\n", " file_count=\"single\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " elem_id=\"file\",\n", " )\n", "\n", " code = gr.Code(lines=30, scale=2, language=\"python\")\n", "\n", " file_3.change(get_file_content, file_3, code)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "current_file_path = Path(__file__).resolve()\n", "relative_path = \"path/to/file\"\n", "absolute_path = (current_file_path.parent / \"..\" / \"..\" / \"gradio\").resolve()\n", "\n", "\n", "def get_file_content(file):\n", " return (file,)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown('### `FileExplorer` to `FileExplorer` -- `file_count=\"multiple\"`')\n", " submit_btn = gr.Button(\"Select\")\n", " with gr.Row():\n", " file = gr.FileExplorer(\n", " glob=\"**/{components,themes}/*.py\",\n", " # value=[\"themes/utils\"],\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", "\n", " file2 = gr.FileExplorer(\n", " glob=\"**/{components,themes}/**/*.py\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", " submit_btn.click(lambda x: x, file, file2)\n", "\n", " gr.Markdown(\"---\")\n", " gr.Markdown('### `FileExplorer` to `Code` -- `file_count=\"single\"`')\n", " with gr.Group():\n", " with gr.Row():\n", " file_3 = gr.FileExplorer(\n", " scale=1,\n", " glob=\"**/{components,themes}/**/*.py\",\n", " value=[\"themes/utils\"],\n", " file_count=\"single\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " elem_id=\"file\",\n", " )\n", "\n", " code = gr.Code(lines=30, scale=2, language=\"python\")\n", "\n", " file_3.change(get_file_content, file_3, code)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/file_explorer_component/run.ipynb b/demo/file_explorer_component/run.ipynb index e57e663503a4..3502445a74f1 100644 --- a/demo/file_explorer_component/run.ipynb +++ b/demo/file_explorer_component/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.FileExplorer()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: file_explorer_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.FileExplorer()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.FileExplorer()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/fraud_detector/run.ipynb b/demo/fraud_detector/run.ipynb index 8d3e7611981f..8b4a2b35dfc5 100644 --- a/demo/fraud_detector/run.ipynb +++ b/demo/fraud_detector/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fraud_detector"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/fraud_detector/fraud.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import random\n", "import os\n", "import gradio as gr\n", "\n", "\n", "def fraud_detector(card_activity, categories, sensitivity):\n", " activity_range = random.randint(0, 100)\n", " drop_columns = [\n", " column for column in [\"retail\", \"food\", \"other\"] if column not in categories\n", " ]\n", " if len(drop_columns):\n", " card_activity.drop(columns=drop_columns, inplace=True)\n", " return (\n", " card_activity,\n", " card_activity,\n", " {\"fraud\": activity_range / 100.0, \"not fraud\": 1 - activity_range / 100.0},\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fraud_detector,\n", " [\n", " gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n", " gr.CheckboxGroup(\n", " [\"retail\", \"food\", \"other\"], value=[\"retail\", \"food\", \"other\"]\n", " ),\n", " gr.Slider(1, 3),\n", " ],\n", " [\n", " \"dataframe\",\n", " gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n", " gr.Label(label=\"Fraud Level\"),\n", " ],\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"fraud.csv\"), [\"retail\", \"food\", \"other\"], 1.0],\n", " ],\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fraud_detector"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/fraud_detector/fraud.csv"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import random\n", "import os\n", "import gradio as gr\n", "\n", "\n", "def fraud_detector(card_activity, categories, sensitivity):\n", " activity_range = random.randint(0, 100)\n", " drop_columns = [\n", " column for column in [\"retail\", \"food\", \"other\"] if column not in categories\n", " ]\n", " if len(drop_columns):\n", " card_activity.drop(columns=drop_columns, inplace=True)\n", " return (\n", " card_activity,\n", " card_activity,\n", " {\"fraud\": activity_range / 100.0, \"not fraud\": 1 - activity_range / 100.0},\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fraud_detector,\n", " [\n", " gr.CheckboxGroup(\n", " [\"retail\", \"food\", \"other\"], value=[\"retail\", \"food\", \"other\"]\n", " ),\n", " gr.Slider(1, 3),\n", " ],\n", " [\n", " \"dataframe\",\n", " gr.Label(label=\"Fraud Level\"),\n", " ],\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"fraud.csv\"), [\"retail\", \"food\", \"other\"], 1.0],\n", " ],\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fraud_detector"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio pandas"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/fraud_detector/fraud.csv"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import random\n", "import os\n", "import gradio as gr\n", "\n", "\n", "def fraud_detector(card_activity, categories, sensitivity):\n", " activity_range = random.randint(0, 100)\n", " drop_columns = [\n", " column for column in [\"retail\", \"food\", \"other\"] if column not in categories\n", " ]\n", " if len(drop_columns):\n", " card_activity.drop(columns=drop_columns, inplace=True)\n", " return (\n", " card_activity,\n", " card_activity,\n", " {\"fraud\": activity_range / 100.0, \"not fraud\": 1 - activity_range / 100.0},\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fraud_detector,\n", " [\n", " gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n", " gr.CheckboxGroup(\n", " [\"retail\", \"food\", \"other\"], value=[\"retail\", \"food\", \"other\"]\n", " ),\n", " gr.Slider(1, 3),\n", " ],\n", " [\n", " \"dataframe\",\n", " gr.Timeseries(x=\"time\", y=[\"retail\", \"food\", \"other\"]),\n", " gr.Label(label=\"Fraud Level\"),\n", " ],\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"fraud.csv\"), [\"retail\", \"food\", \"other\"], 1.0],\n", " ],\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/fraud_detector/run.py b/demo/fraud_detector/run.py index d5384b8ed05e..3425cb8f1872 100644 --- a/demo/fraud_detector/run.py +++ b/demo/fraud_detector/run.py @@ -20,7 +20,6 @@ def fraud_detector(card_activity, categories, sensitivity): demo = gr.Interface( fraud_detector, [ - gr.Timeseries(x="time", y=["retail", "food", "other"]), gr.CheckboxGroup( ["retail", "food", "other"], value=["retail", "food", "other"] ), @@ -28,7 +27,6 @@ def fraud_detector(card_activity, categories, sensitivity): ], [ "dataframe", - gr.Timeseries(x="time", y=["retail", "food", "other"]), gr.Label(label="Fraud Level"), ], examples=[ diff --git a/demo/gender_sentence_custom_interpretation/run.ipynb b/demo/gender_sentence_custom_interpretation/run.ipynb index 18d613c06a26..2dc98cc29013 100644 --- a/demo/gender_sentence_custom_interpretation/run.ipynb +++ b/demo/gender_sentence_custom_interpretation/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gender_sentence_custom_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import re\n", "\n", "import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "# Number of arguments to interpretation function must\n", "# match number of inputs to prediction function\n", "def interpret_gender(sentence):\n", " result = gender_of_sentence(sentence)\n", " is_male = result[\"male\"] > result[\"female\"]\n", " interpretation = []\n", " for word in re.split(\"( )\", sentence):\n", " score = 0\n", " token = word.lower()\n", " if (is_male and token in male_words) or (not is_male and token in female_words):\n", " score = 1\n", " elif (is_male and token in female_words) or (\n", " not is_male and token in male_words\n", " ):\n", " score = -1\n", " interpretation.append((word, score))\n", " # Output must be a list of lists containing the same number of elements as inputs\n", " # Each element corresponds to the interpretation scores for the given input\n", " return [interpretation]\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", " interpretation=interpret_gender,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: gender_sentence_custom_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import re\n", "\n", "import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "# Number of arguments to interpretation function must\n", "# match number of inputs to prediction function\n", "def interpret_gender(sentence):\n", " result = gender_of_sentence(sentence)\n", " is_male = result[\"male\"] > result[\"female\"]\n", " interpretation = []\n", " for word in re.split(\"( )\", sentence):\n", " score = 0\n", " token = word.lower()\n", " if (is_male and token in male_words) or (not is_male and token in female_words):\n", " score = 1\n", " elif (is_male and token in female_words) or (\n", " not is_male and token in male_words\n", " ):\n", " score = -1\n", " interpretation.append((word, score))\n", " # Output must be a list of lists containing the same number of elements as inputs\n", " # Each element corresponds to the interpretation scores for the given input\n", " return [interpretation]\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gender_sentence_custom_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import re\n", "\n", "import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "# Number of arguments to interpretation function must\n", "# match number of inputs to prediction function\n", "def interpret_gender(sentence):\n", " result = gender_of_sentence(sentence)\n", " is_male = result[\"male\"] > result[\"female\"]\n", " interpretation = []\n", " for word in re.split(\"( )\", sentence):\n", " score = 0\n", " token = word.lower()\n", " if (is_male and token in male_words) or (not is_male and token in female_words):\n", " score = 1\n", " elif (is_male and token in female_words) or (\n", " not is_male and token in male_words\n", " ):\n", " score = -1\n", " interpretation.append((word, score))\n", " # Output must be a list of lists containing the same number of elements as inputs\n", " # Each element corresponds to the interpretation scores for the given input\n", " return [interpretation]\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", " interpretation=interpret_gender,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/gender_sentence_custom_interpretation/run.py b/demo/gender_sentence_custom_interpretation/run.py index 93a8f6c6cf48..0b57effcc702 100644 --- a/demo/gender_sentence_custom_interpretation/run.py +++ b/demo/gender_sentence_custom_interpretation/run.py @@ -39,7 +39,6 @@ def interpret_gender(sentence): fn=gender_of_sentence, inputs=gr.Textbox(value="She went to his house to get her keys."), outputs="label", - interpretation=interpret_gender, ) if __name__ == "__main__": diff --git a/demo/gender_sentence_default_interpretation/run.ipynb b/demo/gender_sentence_default_interpretation/run.ipynb index 8c441b18a762..d9b9fa5ff614 100644 --- a/demo/gender_sentence_default_interpretation/run.ipynb +++ b/demo/gender_sentence_default_interpretation/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gender_sentence_default_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", " interpretation=\"default\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: gender_sentence_default_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gender_sentence_default_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "male_words, female_words = [\"he\", \"his\", \"him\"], [\"she\", \"hers\", \"her\"]\n", "\n", "\n", "def gender_of_sentence(sentence):\n", " male_count = len([word for word in sentence.split() if word.lower() in male_words])\n", " female_count = len(\n", " [word for word in sentence.split() if word.lower() in female_words]\n", " )\n", " total = max(male_count + female_count, 1)\n", " return {\"male\": male_count / total, \"female\": female_count / total}\n", "\n", "\n", "demo = gr.Interface(\n", " fn=gender_of_sentence,\n", " inputs=gr.Textbox(value=\"She went to his house to get her keys.\"),\n", " outputs=\"label\",\n", " interpretation=\"default\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/gender_sentence_default_interpretation/run.py b/demo/gender_sentence_default_interpretation/run.py index 99312fda6c52..c37567f38cb7 100644 --- a/demo/gender_sentence_default_interpretation/run.py +++ b/demo/gender_sentence_default_interpretation/run.py @@ -16,7 +16,6 @@ def gender_of_sentence(sentence): fn=gender_of_sentence, inputs=gr.Textbox(value="She went to his house to get her keys."), outputs="label", - interpretation="default", ) if __name__ == "__main__": diff --git a/demo/hello_blocks/run.ipynb b/demo/hello_blocks/run.ipynb index 9d509b14150e..3aed150583cb 100644 --- a/demo/hello_blocks/run.ipynb +++ b/demo/hello_blocks/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", " greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hello_blocks"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", " greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_blocks"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", " greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/hello_blocks/run.py b/demo/hello_blocks/run.py index bfb65715686a..f11bca19f42a 100644 --- a/demo/hello_blocks/run.py +++ b/demo/hello_blocks/run.py @@ -1,8 +1,10 @@ import gradio as gr + def greet(name): return "Hello " + name + "!" + with gr.Blocks() as demo: name = gr.Textbox(label="Name") output = gr.Textbox(label="Output Box") diff --git a/demo/hello_login/run.ipynb b/demo/hello_login/run.ipynb index 785decc44924..6739331e37d2 100644 --- a/demo/hello_login/run.ipynb +++ b/demo/hello_login/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_login"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "user_db = {\"admin\": \"admin\", \"foo\": \"bar\"}\n", "\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!!\"\n", "\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", "if __name__ == \"__main__\":\n", " demo.launch(enable_queue=False,\n", " auth=lambda u, p: user_db.get(u) == p,\n", " auth_message=\"This is a welcome message\")\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hello_login"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import argparse\n", "import sys\n", "\n", "parser = argparse.ArgumentParser()\n", "parser.add_argument(\"--name\", type=str, default=\"User\")\n", "args, unknown = parser.parse_known_args()\n", "print(sys.argv)\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(f\"# Greetings {args.name}!\")\n", " inp = gr.Textbox()\n", " out = gr.Textbox()\n", "\n", " inp.change(fn=lambda x: x, inputs=inp, outputs=out)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_login"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "user_db = {\"admin\": \"admin\", \"foo\": \"bar\"}\n", "\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!!\"\n", "\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", "if __name__ == \"__main__\":\n", " demo.launch(enable_queue=False,\n", " auth=lambda u, p: user_db.get(u) == p,\n", " auth_message=\"This is a welcome message\")\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/hello_login/run.py b/demo/hello_login/run.py index c3e4d2ecd79b..0acefb17bc5c 100644 --- a/demo/hello_login/run.py +++ b/demo/hello_login/run.py @@ -1,14 +1,18 @@ import gradio as gr +import argparse +import sys -user_db = {"admin": "admin", "foo": "bar"} +parser = argparse.ArgumentParser() +parser.add_argument("--name", type=str, default="User") +args, unknown = parser.parse_known_args() +print(sys.argv) +with gr.Blocks() as demo: + gr.Markdown(f"# Greetings {args.name}!") + inp = gr.Textbox() + out = gr.Textbox() -def greet(name): - return "Hello " + name + "!!" + inp.change(fn=lambda x: x, inputs=inp, outputs=out) - -demo = gr.Interface(fn=greet, inputs="text", outputs="text") if __name__ == "__main__": - demo.launch(enable_queue=False, - auth=lambda u, p: user_db.get(u) == p, - auth_message="This is a welcome message") + demo.launch() \ No newline at end of file diff --git a/demo/hello_world/run.ipynb b/demo/hello_world/run.ipynb index cd37f6a7abc9..2f9bffd2afd2 100644 --- a/demo/hello_world/run.ipynb +++ b/demo/hello_world/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_world\n", "### The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch() "]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hello_world\n", "### The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch(show_api=False) "]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: hello_world\n", "### The simplest possible Gradio demo. It wraps a 'Hello {name}!' function in an Interface that accepts and returns text.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "demo = gr.Interface(fn=greet, inputs=\"text\", outputs=\"text\")\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch() "]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/hello_world/run.py b/demo/hello_world/run.py index 4e5dbfbceec5..0642e39f1fb6 100644 --- a/demo/hello_world/run.py +++ b/demo/hello_world/run.py @@ -6,4 +6,4 @@ def greet(name): demo = gr.Interface(fn=greet, inputs="text", outputs="text") if __name__ == "__main__": - demo.launch() \ No newline at end of file + demo.launch(show_api=False) \ No newline at end of file diff --git a/demo/highlightedtext_component/run.ipynb b/demo/highlightedtext_component/run.ipynb index 812cabbbb5f0..ff22ebe451d3 100644 --- a/demo/highlightedtext_component/run.ipynb +++ b/demo/highlightedtext_component/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: highlightedtext_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.HighlightedText(value=[(\"Text\",\"Label 1\"),(\"to be\",\"Label 2\"),(\"highlighted\",\"Label 3\")])\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: highlightedtext_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " gr.HighlightedText(\n", " combine_adjacent=True,\n", " )\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: highlightedtext_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.HighlightedText(value=[(\"Text\",\"Label 1\"),(\"to be\",\"Label 2\"),(\"highlighted\",\"Label 3\")])\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/highlightedtext_component/run.py b/demo/highlightedtext_component/run.py index 4bd90ef5f610..dd3fda76d4ef 100644 --- a/demo/highlightedtext_component/run.py +++ b/demo/highlightedtext_component/run.py @@ -1,6 +1,8 @@ -import gradio as gr +import gradio as gr with gr.Blocks() as demo: - gr.HighlightedText(value=[("Text","Label 1"),("to be","Label 2"),("highlighted","Label 3")]) + gr.HighlightedText( + combine_adjacent=True, + ) -demo.launch() \ No newline at end of file +demo.launch() diff --git a/demo/interface_random_slider/run.ipynb b/demo/interface_random_slider/run.ipynb index ec405b219438..ef67d768a250 100644 --- a/demo/interface_random_slider/run.ipynb +++ b/demo/interface_random_slider/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_random_slider"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def func(slider_1, slider_2, *args):\n", " return slider_1 + slider_2 * 5\n", "\n", "\n", "demo = gr.Interface(\n", " func,\n", " [\n", " gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n", " gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n", " gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n", " gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n", " ],\n", " \"number\",\n", " interpretation=\"default\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: interface_random_slider"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def func(slider_1, slider_2, *args):\n", " return slider_1 + slider_2 * 5\n", "\n", "\n", "demo = gr.Interface(\n", " func,\n", " [\n", " gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n", " gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n", " gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n", " gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n", " ],\n", " \"number\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_random_slider"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def func(slider_1, slider_2, *args):\n", " return slider_1 + slider_2 * 5\n", "\n", "\n", "demo = gr.Interface(\n", " func,\n", " [\n", " gr.Slider(minimum=1.5, maximum=250000.89, randomize=True, label=\"Random Big Range\"),\n", " gr.Slider(minimum=-1, maximum=1, randomize=True, step=0.05, label=\"Random only multiple of 0.05 allowed\"),\n", " gr.Slider(minimum=0, maximum=1, randomize=True, step=0.25, label=\"Random only multiples of 0.25 allowed\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, step=3, label=\"Random between -100 and 100 step 3\"),\n", " gr.Slider(minimum=-100, maximum=100, randomize=True, label=\"Random between -100 and 100\"),\n", " gr.Slider(value=0.25, minimum=5, maximum=30, step=-1),\n", " ],\n", " \"number\",\n", " interpretation=\"default\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/interface_random_slider/run.py b/demo/interface_random_slider/run.py index df5a0f21bec6..5d199f706615 100644 --- a/demo/interface_random_slider/run.py +++ b/demo/interface_random_slider/run.py @@ -16,7 +16,6 @@ def func(slider_1, slider_2, *args): gr.Slider(value=0.25, minimum=5, maximum=30, step=-1), ], "number", - interpretation="default" ) if __name__ == "__main__": diff --git a/demo/interface_series/run.ipynb b/demo/interface_series/run.ipynb index 4b9e0d8789f6..0ed5fe48fcde 100644 --- a/demo/interface_series/run.ipynb +++ b/demo/interface_series/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "get_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\n", "prepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\n", "append_nice = gr.Interface(lambda greeting: f\"{greeting} Nice to meet you!\",\n", " inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\n", "demo = gr.Series(get_name, prepend_hello, append_nice)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: interface_series"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "get_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\n", "prepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\n", "append_nice = gr.Interface(lambda greeting: f\"Nice to meet you!\",\n", " inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\n", "translator = gr.Interface(lambda s: \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\", gr.Textbox(), gr.Image())\n", "demo = gr.Series(get_name, translator, append_nice)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "get_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\n", "prepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\n", "append_nice = gr.Interface(lambda greeting: f\"{greeting} Nice to meet you!\",\n", " inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\n", "demo = gr.Series(get_name, prepend_hello, append_nice)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/interface_series/run.py b/demo/interface_series/run.py index ac942ff94b23..dbad6acd6115 100644 --- a/demo/interface_series/run.py +++ b/demo/interface_series/run.py @@ -2,9 +2,10 @@ get_name = gr.Interface(lambda name: name, inputs="textbox", outputs="textbox") prepend_hello = gr.Interface(lambda name: f"Hello {name}!", inputs="textbox", outputs="textbox") -append_nice = gr.Interface(lambda greeting: f"{greeting} Nice to meet you!", +append_nice = gr.Interface(lambda greeting: f"Nice to meet you!", inputs="textbox", outputs=gr.Textbox(label="Greeting")) -demo = gr.Series(get_name, prepend_hello, append_nice) +translator = gr.Interface(lambda s: "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg", gr.Textbox(), gr.Image()) +demo = gr.Series(get_name, translator, append_nice) if __name__ == "__main__": demo.launch() \ No newline at end of file diff --git a/demo/interface_series_load/run.ipynb b/demo/interface_series_load/run.ipynb index 1d80f3ff3843..9d8f3cd13537 100644 --- a/demo/interface_series_load/run.ipynb +++ b/demo/interface_series_load/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series_load"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "generator = gr.load(\"huggingface/gpt2\")\n", "translator = gr.load(\"huggingface/t5-small\")\n", "\n", "demo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: interface_series_load"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "generator = gr.load(\"huggingface/gpt2\")\n", "\n", "\n", "translator = gr.Interface(lambda s: \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\", gr.Textbox(), gr.Image())\n", "\n", "demo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series_load"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "generator = gr.load(\"huggingface/gpt2\")\n", "translator = gr.load(\"huggingface/t5-small\")\n", "\n", "demo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/interface_series_load/run.py b/demo/interface_series_load/run.py index 13703ccadbf8..e7131fd5b8de 100644 --- a/demo/interface_series_load/run.py +++ b/demo/interface_series_load/run.py @@ -1,7 +1,10 @@ import gradio as gr +import numpy as np generator = gr.load("huggingface/gpt2") -translator = gr.load("huggingface/t5-small") + + +translator = gr.Interface(lambda s: "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg", gr.Textbox(), gr.Image()) demo = gr.Series(generator, translator, description="This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.") diff --git a/demo/kitchen_sink/run.ipynb b/demo/kitchen_sink/run.ipynb index 86eff63c2897..5e7685ae595d 100644 --- a/demo/kitchen_sink/run.ipynb +++ b/demo/kitchen_sink/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cheetah1.jpg\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/lion.jpg\n", "!wget -q -O files/logo.png https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/logo.png\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/titanic.csv\n", "!wget -q -O files/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/tower.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " im2,\n", " im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", " df2,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else os.path.join(os.path.abspath(''), \"files/world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(f\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"\", # HTML\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " df2, # Timeseries\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(label=\"Multiselect Dropdown (Max choice: 2)\", choices=CHOICES, multiselect=True, max_choices=2),\n", " gr.Image(label=\"Image\"),\n", " gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", source=\"webcam\"),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", source=\"microphone\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " gr.Timeseries(x=\"time\", y=[\"price\", \"value\"], colors=[\"pink\", \"purple\"]),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(label=\"HighlightedText\", \n", " color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\"),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " gr.Timeseries(x=\"time\", y=[\"price\", \"value\"], label=\"Timeseries\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/world.mp4\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " os.path.join(os.path.abspath(''), \"files/time.csv\"),\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cheetah1.jpg\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/lion.jpg\n", "!wget -q -O files/logo.png https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/logo.png\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/titanic.csv\n", "!wget -q -O files/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/tower.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " im2,\n", " im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else os.path.join(os.path.abspath(''), \"files/world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(f\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"\", # HTML\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(\n", " label=\"Multiselect Dropdown (Max choice: 2)\",\n", " choices=CHOICES,\n", " multiselect=True,\n", " max_choices=2,\n", " ),\n", " gr.Image(label=\"Image\"),\n", " gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", source=\"webcam\"),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", source=\"microphone\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(\n", " label=\"HighlightedText\", color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\"),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/world.mp4\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: kitchen_sink"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cantina.wav\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/cheetah1.jpg\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/lion.jpg\n", "!wget -q -O files/logo.png https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/logo.png\n", "!wget -q -O files/time.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/time.csv\n", "!wget -q -O files/titanic.csv https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/titanic.csv\n", "!wget -q -O files/tower.jpg https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/tower.jpg\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/kitchen_sink/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import json\n", "\n", "import numpy as np\n", "\n", "import gradio as gr\n", "\n", "CHOICES = [\"foo\", \"bar\", \"baz\"]\n", "JSONOBJ = \"\"\"{\"items\":{\"item\":[{\"id\": \"0001\",\"type\": null,\"is_good\": false,\"ppu\": 0.55,\"batters\":{\"batter\":[{ \"id\": \"1001\", \"type\": \"Regular\" },{ \"id\": \"1002\", \"type\": \"Chocolate\" },{ \"id\": \"1003\", \"type\": \"Blueberry\" },{ \"id\": \"1004\", \"type\": \"Devil's Food\" }]},\"topping\":[{ \"id\": \"5001\", \"type\": \"None\" },{ \"id\": \"5002\", \"type\": \"Glazed\" },{ \"id\": \"5005\", \"type\": \"Sugar\" },{ \"id\": \"5007\", \"type\": \"Powdered Sugar\" },{ \"id\": \"5006\", \"type\": \"Chocolate with Sprinkles\" },{ \"id\": \"5003\", \"type\": \"Chocolate\" },{ \"id\": \"5004\", \"type\": \"Maple\" }]}]}}\"\"\"\n", "\n", "\n", "def fn(\n", " text1,\n", " text2,\n", " num,\n", " slider1,\n", " slider2,\n", " single_checkbox,\n", " checkboxes,\n", " radio,\n", " dropdown,\n", " multi_dropdown,\n", " im1,\n", " im2,\n", " im3,\n", " im4,\n", " video,\n", " audio1,\n", " audio2,\n", " file,\n", " df1,\n", " df2,\n", "):\n", " return (\n", " (text1 if single_checkbox else text2)\n", " + \", selected:\"\n", " + \", \".join(checkboxes), # Text\n", " {\n", " \"positive\": num / (num + slider1 + slider2),\n", " \"negative\": slider1 / (num + slider1 + slider2),\n", " \"neutral\": slider2 / (num + slider1 + slider2),\n", " }, # Label\n", " (audio1[0], np.flipud(audio1[1]))\n", " if audio1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cantina.wav\"), # Audio\n", " np.flipud(im1)\n", " if im1 is not None\n", " else os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"), # Image\n", " video\n", " if video is not None\n", " else os.path.join(os.path.abspath(''), \"files/world.mp4\"), # Video\n", " [\n", " (\"The\", \"art\"),\n", " (\"quick brown\", \"adj\"),\n", " (\"fox\", \"nn\"),\n", " (\"jumped\", \"vrb\"),\n", " (\"testing testing testing\", None),\n", " (\"over\", \"prp\"),\n", " (\"the\", \"art\"),\n", " (\"testing\", None),\n", " (\"lazy\", \"adj\"),\n", " (\"dogs\", \"nn\"),\n", " (\".\", \"punc\"),\n", " ]\n", " + [(f\"test {x}\", f\"test {x}\") for x in range(10)], # HighlightedText\n", " # [(\"The testing testing testing\", None), (\"quick brown\", 0.2), (\"fox\", 1), (\"jumped\", -1), (\"testing testing testing\", 0), (\"over\", 0), (\"the\", 0), (\"testing\", 0), (\"lazy\", 1), (\"dogs\", 0), (\".\", 1)] + [(f\"test {x}\", x/10) for x in range(-10, 10)], # HighlightedText\n", " [\n", " (\"The testing testing testing\", None),\n", " (\"over\", 0.6),\n", " (\"the\", 0.2),\n", " (\"testing\", None),\n", " (\"lazy\", -0.1),\n", " (\"dogs\", 0.4),\n", " (\".\", 0),\n", " ]\n", " + [(f\"test\", x / 10) for x in range(-10, 10)], # HighlightedText\n", " json.loads(JSONOBJ), # JSON\n", " \"\", # HTML\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " df1, # Dataframe\n", " np.random.randint(0, 10, (4, 4)), # Dataframe\n", " df2, # Timeseries\n", " )\n", "\n", "\n", "demo = gr.Interface(\n", " fn,\n", " inputs=[\n", " gr.Textbox(value=\"Lorem ipsum\", label=\"Textbox\"),\n", " gr.Textbox(lines=3, placeholder=\"Type here..\", label=\"Textbox 2\"),\n", " gr.Number(label=\"Number\", value=42),\n", " gr.Slider(10, 20, value=15, label=\"Slider: 10 - 20\"),\n", " gr.Slider(maximum=20, step=0.04, label=\"Slider: step @ 0.04\"),\n", " gr.Checkbox(label=\"Checkbox\"),\n", " gr.CheckboxGroup(label=\"CheckboxGroup\", choices=CHOICES, value=CHOICES[0:2]),\n", " gr.Radio(label=\"Radio\", choices=CHOICES, value=CHOICES[2]),\n", " gr.Dropdown(label=\"Dropdown\", choices=CHOICES),\n", " gr.Dropdown(label=\"Multiselect Dropdown (Max choice: 2)\", choices=CHOICES, multiselect=True, max_choices=2),\n", " gr.Image(label=\"Image\"),\n", " gr.Image(label=\"Image w/ Cropper\", tool=\"select\"),\n", " gr.Image(label=\"Sketchpad\", source=\"canvas\"),\n", " gr.Image(label=\"Webcam\", source=\"webcam\"),\n", " gr.Video(label=\"Video\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Audio(label=\"Microphone\", source=\"microphone\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\", headers=[\"Name\", \"Age\", \"Gender\"]),\n", " gr.Timeseries(x=\"time\", y=[\"price\", \"value\"], colors=[\"pink\", \"purple\"]),\n", " ],\n", " outputs=[\n", " gr.Textbox(label=\"Textbox\"),\n", " gr.Label(label=\"Label\"),\n", " gr.Audio(label=\"Audio\"),\n", " gr.Image(label=\"Image\"),\n", " gr.Video(label=\"Video\"),\n", " gr.HighlightedText(label=\"HighlightedText\", \n", " color_map={\"punc\": \"pink\", \"test 0\": \"blue\"}\n", " ),\n", " gr.HighlightedText(label=\"HighlightedText\", show_legend=True),\n", " gr.JSON(label=\"JSON\"),\n", " gr.HTML(label=\"HTML\"),\n", " gr.File(label=\"File\"),\n", " gr.Dataframe(label=\"Dataframe\"),\n", " gr.Dataframe(label=\"Numpy\"),\n", " gr.Timeseries(x=\"time\", y=[\"price\", \"value\"], label=\"Timeseries\"),\n", " ],\n", " examples=[\n", " [\n", " \"the quick brown fox\",\n", " \"jumps over the lazy dog\",\n", " 10,\n", " 12,\n", " 4,\n", " True,\n", " [\"foo\", \"baz\"],\n", " \"baz\",\n", " \"bar\",\n", " [\"foo\", \"bar\"],\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"files/world.mp4\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/cantina.wav\"),\n", " os.path.join(os.path.abspath(''), \"files/titanic.csv\"),\n", " [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]],\n", " os.path.join(os.path.abspath(''), \"files/time.csv\"),\n", " ]\n", " ]\n", " * 3,\n", " title=\"Kitchen Sink\",\n", " description=\"Try out all the components!\",\n", " article=\"Learn more about [Gradio](http://gradio.app)\",\n", " cache_examples=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/kitchen_sink/run.py b/demo/kitchen_sink/run.py index 70a27150060f..bc5343244c8c 100755 --- a/demo/kitchen_sink/run.py +++ b/demo/kitchen_sink/run.py @@ -29,7 +29,6 @@ def fn( audio2, file, df1, - df2, ): return ( (text1 if single_checkbox else text2) @@ -81,7 +80,6 @@ def fn( os.path.join(os.path.dirname(__file__), "files/titanic.csv"), df1, # Dataframe np.random.randint(0, 10, (4, 4)), # Dataframe - df2, # Timeseries ) @@ -97,7 +95,12 @@ def fn( gr.CheckboxGroup(label="CheckboxGroup", choices=CHOICES, value=CHOICES[0:2]), gr.Radio(label="Radio", choices=CHOICES, value=CHOICES[2]), gr.Dropdown(label="Dropdown", choices=CHOICES), - gr.Dropdown(label="Multiselect Dropdown (Max choice: 2)", choices=CHOICES, multiselect=True, max_choices=2), + gr.Dropdown( + label="Multiselect Dropdown (Max choice: 2)", + choices=CHOICES, + multiselect=True, + max_choices=2, + ), gr.Image(label="Image"), gr.Image(label="Image w/ Cropper", tool="select"), gr.Image(label="Sketchpad", source="canvas"), @@ -107,7 +110,6 @@ def fn( gr.Audio(label="Microphone", source="microphone"), gr.File(label="File"), gr.Dataframe(label="Dataframe", headers=["Name", "Age", "Gender"]), - gr.Timeseries(x="time", y=["price", "value"], colors=["pink", "purple"]), ], outputs=[ gr.Textbox(label="Textbox"), @@ -115,8 +117,8 @@ def fn( gr.Audio(label="Audio"), gr.Image(label="Image"), gr.Video(label="Video"), - gr.HighlightedText(label="HighlightedText", - color_map={"punc": "pink", "test 0": "blue"} + gr.HighlightedText( + label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"} ), gr.HighlightedText(label="HighlightedText", show_legend=True), gr.JSON(label="JSON"), @@ -124,7 +126,6 @@ def fn( gr.File(label="File"), gr.Dataframe(label="Dataframe"), gr.Dataframe(label="Numpy"), - gr.Timeseries(x="time", y=["price", "value"], label="Timeseries"), ], examples=[ [ @@ -147,7 +148,6 @@ def fn( os.path.join(os.path.dirname(__file__), "files/cantina.wav"), os.path.join(os.path.dirname(__file__), "files/titanic.csv"), [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 1, 2], [3, 4, 5, 6]], - os.path.join(os.path.dirname(__file__), "files/time.csv"), ] ] * 3, @@ -158,4 +158,4 @@ def fn( ) if __name__ == "__main__": - demo.launch() \ No newline at end of file + demo.launch() diff --git a/demo/kitchen_sink_random/run.py b/demo/kitchen_sink_random/run.py index 41c92d8a8db7..5b4440e94984 100644 --- a/demo/kitchen_sink_random/run.py +++ b/demo/kitchen_sink_random/run.py @@ -59,9 +59,7 @@ {"random_number_rows": range(random.randint(0, 10))} ) ), - gr.Timeseries(value=lambda: os.path.join(file_dir, "time.csv")), gr.State(value=lambda: random.choice(string.ascii_lowercase)), - gr.Button(value=lambda: random.choice(["Run", "Go", "predict"])), gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])), gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])), gr.HighlightedText( diff --git a/demo/model3D/run.ipynb b/demo/model3D/run.ipynb index 7ea65d26b593..53e4ceea0a1a 100644 --- a/demo/model3D/run.ipynb +++ b/demo/model3D/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/model3D/run.py b/demo/model3D/run.py index 5a765cb19078..e26752f8d5dd 100644 --- a/demo/model3D/run.py +++ b/demo/model3D/run.py @@ -17,6 +17,7 @@ def load_mesh(mesh_file_name): [os.path.join(os.path.dirname(__file__), "files/Fox.gltf")], [os.path.join(os.path.dirname(__file__), "files/face.obj")], ], + cache_examples=True ) if __name__ == "__main__": diff --git a/demo/on_listener_basic/run.ipynb b/demo/on_listener_basic/run.ipynb index d3cc658c5bb7..8f78c318b40a 100644 --- a/demo/on_listener_basic/run.ipynb +++ b/demo/on_listener_basic/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_basic"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", " gr.on(\n", " triggers=[name.submit, greet_btn.click],\n", " fn=greet,\n", " inputs=name,\n", " outputs=output,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: on_listener_basic"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", " gr.on(\n", " triggers=[name.submit, greet_btn.click],\n", " fn=greet,\n", " inputs=name,\n", " outputs=output,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_basic"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", " gr.on(\n", " triggers=[name.submit, greet_btn.click],\n", " fn=greet,\n", " inputs=name,\n", " outputs=output,\n", " )\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/on_listener_decorator/run.ipynb b/demo/on_listener_decorator/run.ipynb index 1483324520b2..7091592bf529 100644 --- a/demo/on_listener_decorator/run.ipynb +++ b/demo/on_listener_decorator/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_decorator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " @gr.on(triggers=[name.submit, greet_btn.click], inputs=name, outputs=output)\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: on_listener_decorator"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " @gr.on(triggers=[name.submit, greet_btn.click], inputs=name, outputs=output)\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_decorator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " name = gr.Textbox(label=\"Name\")\n", " output = gr.Textbox(label=\"Output Box\")\n", " greet_btn = gr.Button(\"Greet\")\n", "\n", " @gr.on(triggers=[name.submit, greet_btn.click], inputs=name, outputs=output)\n", " def greet(name):\n", " return \"Hello \" + name + \"!\"\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/on_listener_live/run.ipynb b/demo/on_listener_live/run.ipynb index e6a97f9293e9..c119a21a61fd 100644 --- a/demo/on_listener_live/run.ipynb +++ b/demo/on_listener_live/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_live"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " num1 = gr.Slider(1, 10)\n", " num2 = gr.Slider(1, 10)\n", " num3 = gr.Slider(1, 10)\n", " output = gr.Number(label=\"Sum\")\n", "\n", " @gr.on(inputs=[num1, num2, num3], outputs=output)\n", " def sum(a, b, c):\n", " return a + b + c\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: on_listener_live"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " num1 = gr.Slider(1, 10)\n", " num2 = gr.Slider(1, 10)\n", " num3 = gr.Slider(1, 10)\n", " output = gr.Number(label=\"Sum\")\n", "\n", " @gr.on(inputs=[num1, num2, num3], outputs=output)\n", " def sum(a, b, c):\n", " return a + b + c\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: on_listener_live"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " num1 = gr.Slider(1, 10)\n", " num2 = gr.Slider(1, 10)\n", " num3 = gr.Slider(1, 10)\n", " output = gr.Number(label=\"Sum\")\n", "\n", " @gr.on(inputs=[num1, num2, num3], outputs=output)\n", " def sum(a, b, c):\n", " return a + b + c\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/stable-diffusion/run.ipynb b/demo/stable-diffusion/run.ipynb index 95d61e179903..a059ed9ddf3e 100644 --- a/demo/stable-diffusion/run.ipynb +++ b/demo/stable-diffusion/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if images_list[\"nsfw_content_detected\"][i]:\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " height=\"auto\",\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if images_list[\"nsfw_content_detected\"][i]:\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " height=\"auto\",\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if images_list[\"nsfw_content_detected\"][i]:\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " height=\"auto\",\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/stt_or_tts/run.ipynb b/demo/stt_or_tts/run.ipynb index ae4ef31d6ae0..ad53035e0e6f 100644 --- a/demo/stt_or_tts/run.ipynb +++ b/demo/stt_or_tts/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stt_or_tts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "tts_examples = [\n", " \"I love learning machine learning\",\n", " \"How do you do?\",\n", "]\n", "\n", "tts_demo = gr.load(\n", " \"huggingface/facebook/fastspeech2-en-ljspeech\",\n", " title=None,\n", " examples=tts_examples,\n", " description=\"Give me something to say!\",\n", ")\n", "\n", "stt_demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=None,\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", ")\n", "\n", "demo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stt_or_tts"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "tts_examples = [\n", " \"I love learning machine learning\",\n", " \"How do you do?\",\n", "]\n", "\n", "tts_demo = gr.load(\n", " \"huggingface/facebook/fastspeech2-en-ljspeech\",\n", " title=None,\n", " examples=tts_examples,\n", " description=\"Give me something to say!\",\n", " cache_examples=False\n", ")\n", "\n", "stt_demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=None,\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", ")\n", "\n", "demo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stt_or_tts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "tts_examples = [\n", " \"I love learning machine learning\",\n", " \"How do you do?\",\n", "]\n", "\n", "tts_demo = gr.load(\n", " \"huggingface/facebook/fastspeech2-en-ljspeech\",\n", " title=None,\n", " examples=tts_examples,\n", " description=\"Give me something to say!\",\n", ")\n", "\n", "stt_demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=None,\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", ")\n", "\n", "demo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/stt_or_tts/run.py b/demo/stt_or_tts/run.py index ed6a1adc32cf..98c97dd37611 100644 --- a/demo/stt_or_tts/run.py +++ b/demo/stt_or_tts/run.py @@ -10,6 +10,7 @@ title=None, examples=tts_examples, description="Give me something to say!", + cache_examples=False ) stt_demo = gr.load( diff --git a/demo/timeseries_component/run.ipynb b/demo/timeseries_component/run.ipynb deleted file mode 100644 index 5cff7ead96b5..000000000000 --- a/demo/timeseries_component/run.ipynb +++ /dev/null @@ -1 +0,0 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: timeseries_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " gr.Timeseries()\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/timeseries_component/run.py b/demo/timeseries_component/run.py deleted file mode 100644 index 0e65d8907fed..000000000000 --- a/demo/timeseries_component/run.py +++ /dev/null @@ -1,6 +0,0 @@ -import gradio as gr - -with gr.Blocks() as demo: - gr.Timeseries() - -demo.launch() \ No newline at end of file diff --git a/demo/video_component/run.ipynb b/demo/video_component/run.ipynb index 290e9a4d2121..4e4bccc3eaac 100644 --- a/demo/video_component/run.ipynb +++ b/demo/video_component/run.ipynb @@ -1 +1,5 @@ -{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/a.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/a.mp4\n", "!wget -q -O files/b.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/b.mp4\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "a = os.path.join(os.path.abspath(''), \"files/world.mp4\") # Video\n", "b = os.path.join(os.path.abspath(''), \"files/a.mp4\") # Video\n", "c = os.path.join(os.path.abspath(''), \"files/b.mp4\") # Video\n", "\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(type=\"file\"),\n", " outputs=gr.Video(),\n", " examples=[\n", " [a],\n", " [b],\n", " [c],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file +<<<<<<< HEAD +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/a.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/a.mp4\n", "!wget -q -O files/b.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/b.mp4\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "a = os.path.join(os.path.abspath(''), \"files/world.mp4\") # Video\n", "b = os.path.join(os.path.abspath(''), \"files/a.mp4\") # Video\n", "c = os.path.join(os.path.abspath(''), \"files/b.mp4\") # Video\n", "\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(type=\"file\"),\n", " outputs=gr.Video(),\n", " examples=[\n", " [a],\n", " [b],\n", " [c],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +======= +{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/a.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/a.mp4\n", "!wget -q -O files/b.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/b.mp4\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "a = os.path.join(os.path.abspath(''), \"files/world.mp4\") # Video\n", "b = os.path.join(os.path.abspath(''), \"files/a.mp4\") # Video\n", "c = os.path.join(os.path.abspath(''), \"files/b.mp4\") # Video\n", "\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(type=\"file\"),\n", " outputs=gr.Video(),\n", " examples=[\n", " [a],\n", " [b],\n", " [c],\n", " ],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} +>>>>>>> main diff --git a/demo/video_component/run.py b/demo/video_component/run.py index 254a1fcfbb27..5b3971ebc75f 100644 --- a/demo/video_component/run.py +++ b/demo/video_component/run.py @@ -16,6 +16,7 @@ [b], [c], ], + cache_examples=True ) if __name__ == "__main__": diff --git a/gradio/CHANGELOG.md b/gradio/CHANGELOG.md index 04b46b565b24..78bc3a913fbf 100644 --- a/gradio/CHANGELOG.md +++ b/gradio/CHANGELOG.md @@ -113,6 +113,72 @@ For more information check the [`FileExplorer` documentation](https://gradio.app ### Fixes - [#5625](https://github.com/gradio-app/gradio/pull/5625) [`9ccc4794a`](https://github.com/gradio-app/gradio/commit/9ccc4794a72ce8319417119f6c370e7af3ffca6d) - Use ContextVar instead of threading.local(). Thanks [@cbensimon](https://github.com/cbensimon)! +- [#5636](https://github.com/gradio-app/gradio/pull/5636) [`fb5964fb8`](https://github.com/gradio-app/gradio/commit/fb5964fb88082e7b956853b543c468116811cab9) - Fix bug in example cache loading event. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5633](https://github.com/gradio-app/gradio/pull/5633) [`341402337`](https://github.com/gradio-app/gradio/commit/34140233794c29d4722020e13c2d045da642dfae) - Allow Gradio apps containing `gr.Radio()`, `gr.Checkboxgroup()`, or `gr.Dropdown()` to be loaded with `gr.load()`. Thanks [@abidlabs](https://github.com/abidlabs)! +- [#5593](https://github.com/gradio-app/gradio/pull/5593) [`88d43bd12`](https://github.com/gradio-app/gradio/commit/88d43bd124792d216da445adef932a2b02f5f416) - Fixes avatar image in chatbot being squashed. Thanks [@dawoodkhan82](https://github.com/dawoodkhan82)! + +## 3.45.0-beta.8 + +### Features + +- [#5649](https://github.com/gradio-app/gradio/pull/5649) [`d56b355c1`](https://github.com/gradio-app/gradio/commit/d56b355c12ccdeeb8406a3520fecc15ae69d9141) - Fix front-end imports + other misc fixes. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5651](https://github.com/gradio-app/gradio/pull/5651) [`0ab84bf80`](https://github.com/gradio-app/gradio/commit/0ab84bf80f66c866327473d08fe5bdc8d32f155a) - Add overwrite flag to create command. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.45.0-beta.7 + +### Features + +- [#5648](https://github.com/gradio-app/gradio/pull/5648) [`c573e2339`](https://github.com/gradio-app/gradio/commit/c573e2339b86c85b378dc349de5e9223a3c3b04a) - Publish all components to npm. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5637](https://github.com/gradio-app/gradio/pull/5637) [`670cfb75b`](https://github.com/gradio-app/gradio/commit/670cfb75b7cfd5a25a22c5aa307cd29c8879889e) - Some minor v4 fixes. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.45.0-beta.6 + +### Features + +- [#5630](https://github.com/gradio-app/gradio/pull/5630) [`0b4fd5b6d`](https://github.com/gradio-app/gradio/commit/0b4fd5b6db96fc95a155e5e935e17e1ab11d1161) - Fix esbuild. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.45.0-beta.5 + +### Features + +- [#5624](https://github.com/gradio-app/gradio/pull/5624) [`14fc612d8`](https://github.com/gradio-app/gradio/commit/14fc612d84bf6b1408eccd3a40fab41f25477571) - Fix esbuild. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.45.0-beta.4 + +### Features + +- [#5620](https://github.com/gradio-app/gradio/pull/5620) [`c4c25ecdf`](https://github.com/gradio-app/gradio/commit/c4c25ecdf8c2fab5e3c41b519564e3b6a9ebfce3) - fix build and broken imports. Thanks [@pngwn](https://github.com/pngwn)! + +## 3.45.0-beta.3 + +### Features + +- [#5618](https://github.com/gradio-app/gradio/pull/5618) [`327cc4a6c`](https://github.com/gradio-app/gradio/commit/327cc4a6c1a213238cecd21f2b6c9cedc64bde5b) - Add docstring to trigger release. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.45.0-beta.2 + +### Features + +- [#5615](https://github.com/gradio-app/gradio/pull/5615) [`142880ba5`](https://github.com/gradio-app/gradio/commit/142880ba589126d98da3d6a38866828864cc6b81) - Publish js theme. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! +- [#5613](https://github.com/gradio-app/gradio/pull/5613) [`d0b22b6cf`](https://github.com/gradio-app/gradio/commit/d0b22b6cf4345ce9954b166f8b4278f8d3e24472) - backend linting. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.45.0-beta.1 + +### Features + +- [#5610](https://github.com/gradio-app/gradio/pull/5610) [`73f2e8e7e`](https://github.com/gradio-app/gradio/commit/73f2e8e7e426e80e397b5bf23b3a64b0dd6f4e09) - Fix js deps in cli and add gradio-preview artifacts to build. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! + +## 3.45.0-beta.0 + +### Features + +- [#5507](https://github.com/gradio-app/gradio/pull/5507) [`1385dc688`](https://github.com/gradio-app/gradio/commit/1385dc6881f2d8ae7a41106ec21d33e2ef04d6a9) - Custom components. Thanks [@pngwn](https://github.com/pngwn)! +- [#5498](https://github.com/gradio-app/gradio/pull/5498) [`681f10c31`](https://github.com/gradio-app/gradio/commit/681f10c315a75cc8cd0473c9a0167961af7696db) - release first version. Thanks [@pngwn](https://github.com/pngwn)! +- [#5589](https://github.com/gradio-app/gradio/pull/5589) [`af1b2f9ba`](https://github.com/gradio-app/gradio/commit/af1b2f9bafbacf2804fcfe68af6bb4b921442aca) - image fixes. Thanks [@pngwn](https://github.com/pngwn)! +- [#5240](https://github.com/gradio-app/gradio/pull/5240) [`da05e59a5`](https://github.com/gradio-app/gradio/commit/da05e59a53bbad15e5755a47f46685da18e1031e) - Cleanup of .update and .get_config per component. Thanks [@aliabid94](https://github.com/aliabid94)!/n get_config is removed, the config used is simply any attribute that is in the Block that shares a name with one of the constructor paramaters./n update is not removed for backwards compatibility, but deprecated. Instead return the component itself. Created a updateable decorator that simply checks to see if we're in an update, and if so, skips the constructor and wraps the args and kwargs in an update dictionary. easy peasy. + +### Fixes + - [#5602](https://github.com/gradio-app/gradio/pull/5602) [`54d21d3f1`](https://github.com/gradio-app/gradio/commit/54d21d3f18f2ddd4e796d149a0b41461f49c711b) - Ensure `HighlightedText` with `merge_elements` loads without a value. Thanks [@hannahblair](https://github.com/hannahblair)! - [#5636](https://github.com/gradio-app/gradio/pull/5636) [`fb5964fb8`](https://github.com/gradio-app/gradio/commit/fb5964fb88082e7b956853b543c468116811cab9) - Fix bug in example cache loading event. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! - [#5633](https://github.com/gradio-app/gradio/pull/5633) [`341402337`](https://github.com/gradio-app/gradio/commit/34140233794c29d4722020e13c2d045da642dfae) - Allow Gradio apps containing `gr.Radio()`, `gr.Checkboxgroup()`, or `gr.Dropdown()` to be loaded with `gr.load()`. Thanks [@abidlabs](https://github.com/abidlabs)! diff --git a/gradio/__init__.py b/gradio/__init__.py index e615c9578207..7bda540a39db 100644 --- a/gradio/__init__.py +++ b/gradio/__init__.py @@ -1,13 +1,13 @@ import json import gradio.components as components -import gradio.inputs as inputs -import gradio.outputs as outputs +import gradio.layouts as layouts import gradio.processing_utils import gradio.templates import gradio.themes as themes from gradio.blocks import Blocks from gradio.chat_interface import ChatInterface +from gradio.cli import deploy from gradio.components import ( HTML, JSON, @@ -36,7 +36,6 @@ HighlightedText, Highlightedtext, Image, - Interpretation, Json, Label, LinePlot, @@ -53,15 +52,13 @@ StatusTracker, Text, Textbox, - TimeSeries, - Timeseries, UploadButton, Variable, Video, component, ) -from gradio.deploy_space import deploy -from gradio.events import LikeData, SelectData, on +from gradio.data_classes import FileData +from gradio.events import EventData, LikeData, SelectData, on from gradio.exceptions import Error from gradio.external import load from gradio.flagging import ( @@ -72,7 +69,6 @@ SimpleCSVLogger, ) from gradio.helpers import ( - EventData, Info, Progress, Warning, diff --git a/gradio/blocks.py b/gradio/blocks.py index 682485d9ed4a..2759dd604179 100644 --- a/gradio/blocks.py +++ b/gradio/blocks.py @@ -1,6 +1,7 @@ from __future__ import annotations import copy +import hashlib import inspect import json import os @@ -11,9 +12,7 @@ import time import warnings import webbrowser -from abc import abstractmethod from collections import defaultdict -from functools import wraps from pathlib import Path from types import ModuleType from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Literal, Sequence, cast @@ -21,16 +20,15 @@ import anyio import requests from anyio import CapacityLimiter -from gradio_client import serializing from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from packaging import version from gradio import ( analytics, components, external, networking, + processing_utils, queueing, routes, strings, @@ -39,14 +37,16 @@ wasm_utils, ) from gradio.context import Context +from gradio.data_classes import FileData from gradio.deprecation import check_deprecated_parameters, warn_deprecation +from gradio.events import EventData, EventListener, EventListenerMethod from gradio.exceptions import ( DuplicateBlockError, InvalidApiNameError, InvalidBlockError, InvalidComponentError, ) -from gradio.helpers import EventData, create_tracker, skip, special_args +from gradio.helpers import create_tracker, skip, special_args from gradio.state_holder import SessionState from gradio.themes import Default as DefaultTheme from gradio.themes import ThemeClass as Theme @@ -77,8 +77,7 @@ if TYPE_CHECKING: # Only import for type checking (is False at runtime). from fastapi.applications import FastAPI - from gradio.components import Component - from gradio.events import EventListenerMethod + from gradio.components.base import Component BUILT_IN_THEMES: dict[str, Theme] = { t.name: t @@ -92,42 +91,6 @@ } -def in_event_listener(): - from gradio.context import LocalContext - - return LocalContext.in_event_listener.get() - - -def updateable(fn): - @wraps(fn) - def wrapper(*args, **kwargs): - fn_args = inspect.getfullargspec(fn).args - self = args[0] - for i, arg in enumerate(args): - if i == 0 or i >= len(fn_args): # skip self, *args - continue - arg_name = fn_args[i] - kwargs[arg_name] = arg - self.constructor_args = kwargs - if in_event_listener(): - return None - else: - return fn(self, **kwargs) - - return wrapper - - -updated_cls_set = set() - - -class Updateable: - def __new__(cls, *args, **kwargs): - if cls not in updated_cls_set: - cls.__init__ = updateable(cls.__init__) - updated_cls_set.add(cls) - return super().__new__(cls) - - class Block: def __init__( self, @@ -159,6 +122,16 @@ def __init__( self.render() check_deprecated_parameters(self.__class__.__name__, kwargs=kwargs) + @property + def skip_api(self): + return False + + @property + def events( + self, + ) -> list[EventListener]: + return getattr(self, "EVENTS", []) + def render(self): """ Adds self into appropriate BlockContext @@ -172,7 +145,7 @@ def render(self): if Context.root_block is not None: Context.root_block.blocks[self._id] = self self.is_rendered = True - if isinstance(self, components.IOComponent): + if isinstance(self, components.Component): Context.root_block.temp_file_sets.append(self.temp_files) return self @@ -218,10 +191,13 @@ def get_config(self): if hasattr(self, parameter.name): value = getattr(self, parameter.name) config[parameter.name] = value + for e in self.events: + to_add = e.config_data() + if to_add: + config = {**config, **to_add} return {**config, "root_url": self.root_url, "name": self.get_block_name()} @staticmethod - @abstractmethod def update(**kwargs) -> dict: return {} @@ -241,6 +217,24 @@ def __init__( self.children: list[Block] = [] Block.__init__(self, visible=visible, render=render, **kwargs) + TEMPLATE_DIR = "./templates/" + FRONTEND_DIR = "../../frontend/" + + @property + def skip_api(self): + return True + + @classmethod + def get_component_class_id(cls) -> str: + module_name = cls.__module__ + module_path = sys.modules[module_name].__file__ + module_hash = hashlib.md5(f"{cls.__name__}_{module_path}".encode()).hexdigest() + return module_hash + + @property + def component_class_id(self): + return self.get_component_class_id() + def add_child(self, child: Block): self.children.append(child) @@ -333,7 +327,9 @@ def __repr__(self): return str(self) -def postprocess_update_dict(block: Block, update_dict: dict, postprocess: bool = True): +def postprocess_update_dict( + block: Component | BlockContext, update_dict: dict, postprocess: bool = True +): """ Converts a dictionary of updates into a format that can be sent to the frontend. E.g. {"__type__": "update", "value": "2", "interactive": False} @@ -354,7 +350,7 @@ def postprocess_update_dict(block: Block, update_dict: dict, postprocess: bool = attr_dict["__type__"] = "update" attr_dict.pop("value", None) if "value" in update_dict: - if not isinstance(block, components.IOComponent): + if not isinstance(block, components.Component): raise InvalidComponentError( f"Component {block.__class__} does not support value" ) @@ -392,147 +388,6 @@ def convert_component_dict_to_list( return predictions -def get_api_info(config: dict, serialize: bool = True): - """ - Gets the information needed to generate the API docs from a Blocks config. - Parameters: - config: a Blocks config dictionary - serialize: If True, returns the serialized version of the typed information. If False, returns the raw version. - """ - api_info = {"named_endpoints": {}, "unnamed_endpoints": {}} - mode = config.get("mode", None) - after_new_format = version.parse(config.get("version", "2.0")) > version.Version( - "3.28.3" - ) - - for d, dependency in enumerate(config["dependencies"]): - dependency_info = {"parameters": [], "returns": []} - skip_endpoint = False - - inputs = dependency["inputs"] - for i in inputs: - for component in config["components"]: - if component["id"] == i: - break - else: - skip_endpoint = True # if component not found, skip endpoint - break - type = component["type"] - if type in client_utils.SKIP_COMPONENTS: - continue - if ( - not component.get("serializer") - and type not in serializing.COMPONENT_MAPPING - ): - skip_endpoint = True # if component not serializable, skip endpoint - break - if type in client_utils.SKIP_COMPONENTS: - continue - label = component["props"].get("label", f"parameter_{i}") - # The config has the most specific API info (taking into account the parameters - # of the component), so we use that if it exists. Otherwise, we fallback to the - # Serializer's API info. - serializer = serializing.COMPONENT_MAPPING[type]() - if component.get("api_info") and after_new_format: - info = component["api_info"] - example = component["example_inputs"]["serialized"] - else: - assert isinstance(serializer, serializing.Serializable) - info = serializer.api_info() - example = serializer.example_inputs()["raw"] - python_info = info["info"] - if serialize and info["serialized_info"]: - python_info = serializer.serialized_info() - if ( - isinstance(serializer, serializing.FileSerializable) - and component["props"].get("file_count", "single") != "single" - ): - python_info = serializer._multiple_file_serialized_info() - - python_type = client_utils.json_schema_to_python_type(python_info) - serializer_name = serializing.COMPONENT_MAPPING[type].__name__ - dependency_info["parameters"].append( - { - "label": label, - "type": info["info"], - "python_type": { - "type": python_type, - "description": python_info.get("description", ""), - }, - "component": type.capitalize(), - "example_input": example, - "serializer": serializer_name, - } - ) - - outputs = dependency["outputs"] - for o in outputs: - for component in config["components"]: - if component["id"] == o: - break - else: - skip_endpoint = True # if component not found, skip endpoint - break - type = component["type"] - if type in client_utils.SKIP_COMPONENTS: - continue - if ( - not component.get("serializer") - and type not in serializing.COMPONENT_MAPPING - ): - skip_endpoint = True # if component not serializable, skip endpoint - break - label = component["props"].get("label", f"value_{o}") - serializer = serializing.COMPONENT_MAPPING[type]() - if component.get("api_info") and after_new_format: - info = component["api_info"] - example = component["example_inputs"]["serialized"] - else: - assert isinstance(serializer, serializing.Serializable) - info = serializer.api_info() - example = serializer.example_inputs()["raw"] - python_info = info["info"] - if serialize and info["serialized_info"]: - python_info = serializer.serialized_info() - if ( - isinstance(serializer, serializing.FileSerializable) - and component["props"].get("file_count", "single") != "single" - ): - python_info = serializer._multiple_file_serialized_info() - python_type = client_utils.json_schema_to_python_type(python_info) - serializer_name = serializing.COMPONENT_MAPPING[type].__name__ - dependency_info["returns"].append( - { - "label": label, - "type": info["info"], - "python_type": { - "type": python_type, - "description": python_info.get("description", ""), - }, - "component": type.capitalize(), - "serializer": serializer_name, - } - ) - - if not dependency["backend_fn"]: - skip_endpoint = True - - if skip_endpoint: - continue - if dependency["api_name"] is not None and dependency["api_name"] is not False: - api_info["named_endpoints"][f"/{dependency['api_name']}"] = dependency_info - elif ( - dependency["api_name"] is False - or mode == "interface" - or mode == "tabbed_interface" - ): - pass # Skip unnamed endpoints in interface mode - else: - api_info["unnamed_endpoints"][str(d)] = dependency_info - - return api_info - - @document("launch", "queue", "integrate", "load") class Blocks(BlockContext): """ @@ -630,7 +485,7 @@ def __init__( else: os.environ["HF_HUB_DISABLE_TELEMETRY"] = "True" super().__init__(render=False, **kwargs) - self.blocks: dict[int, Block] = {} + self.blocks: dict[int, Component | Block] = {} self.fns: list[BlockFunction] = [] self.dependencies = [] self.mode = mode @@ -679,9 +534,14 @@ def __init__( } analytics.initiated_analytics(data) + def get_component(self, id: int) -> Component: + comp = self.blocks[id] + assert isinstance(comp, components.Component), f"{comp}" + return comp + @property def _is_running_in_reload_thread(self): - from gradio.reload import reload_thread + from gradio.cli.commands.reload import reload_thread return getattr(reload_thread, "running_reload", False) @@ -810,7 +670,12 @@ def iterate_over_children(children_list): dependency.pop("status_tracker", None) dependency["preprocess"] = False dependency["postprocess"] = False - + targets = [ + EventListenerMethod( + t.__self__ if t.has_trigger else None, t.event_name + ) + for t in targets + ] dependency = blocks.set_event_trigger( targets=targets, fn=fn, **dependency )[0] @@ -869,7 +734,7 @@ def set_event_trigger( preprocess: bool = True, postprocess: bool = True, scroll_to_output: bool = False, - show_progress: str = "full", + show_progress: Literal["full", "minimal", "hidden"] | None = "full", api_name: str | None | Literal[False] = None, js: str | None = None, no_target: bool = False, @@ -909,7 +774,7 @@ def set_event_trigger( # Support for singular parameter _targets = [ ( - target.trigger._id if target.trigger and not no_target else None, + target.block._id if target.block and not no_target else None, target.event_name, ) for target in targets @@ -1124,8 +989,8 @@ def __call__(self, *inputs, fn_index: int = 0, api_name: str | None = None): if batch: outputs = [out[0] for out in outputs] - processed_outputs = self.deserialize_data(fn_index, outputs) - processed_outputs = utils.resolve_singleton(processed_outputs) + outputs = self.deserialize_data(fn_index, outputs) + processed_outputs = utils.resolve_singleton(outputs) return processed_outputs @@ -1220,6 +1085,9 @@ def serialize_data(self, fn_index: int, inputs: list[Any]) -> list[Any]: dependency = self.dependencies[fn_index] processed_input = [] + def format_file(s): + return FileData(name=s, is_file=True).model_dump() + for i, input_id in enumerate(dependency["inputs"]): try: block = self.blocks[input_id] @@ -1227,11 +1095,19 @@ def serialize_data(self, fn_index: int, inputs: list[Any]) -> list[Any]: raise InvalidBlockError( f"Input component with id {input_id} used in {dependency['trigger']}() event is not defined in this gr.Blocks context. You are allowed to nest gr.Blocks contexts, but there must be a gr.Blocks context that contains all components and events." ) from e - if not isinstance(block, components.IOComponent): + if not isinstance(block, components.Component): raise InvalidComponentError( f"{block.__class__} Component with id {input_id} not a valid input component." ) - serialized_input = block.serialize(inputs[i]) + api_info = block.api_info() + if client_utils.value_is_file(api_info): + serialized_input = client_utils.traverse( + inputs[i], + format_file, + lambda s: client_utils.is_filepath(s) or client_utils.is_url(s), + ) + else: + serialized_input = inputs[i] processed_input.append(serialized_input) return processed_input @@ -1247,15 +1123,13 @@ def deserialize_data(self, fn_index: int, outputs: list[Any]) -> list[Any]: raise InvalidBlockError( f"Output component with id {output_id} used in {dependency['trigger']}() event not found in this gr.Blocks context. You are allowed to nest gr.Blocks contexts, but there must be a gr.Blocks context that contains all components and events." ) from e - if not isinstance(block, components.IOComponent): + if not isinstance(block, components.Component): raise InvalidComponentError( f"{block.__class__} Component with id {output_id} not a valid output component." ) - deserialized = block.deserialize( - outputs[o], - save_dir=block.DEFAULT_TEMP_DIR, - root_url=block.root_url, - hf_token=Context.hf_token, + + deserialized = client_utils.traverse( + outputs[o], lambda s: s["name"], client_utils.is_file_obj ) predictions.append(deserialized) @@ -1326,7 +1200,10 @@ def preprocess_data( else: if input_id in state: block = state[input_id] - processed_input.append(block.preprocess(inputs[i])) + inputs_cached = processing_utils.move_files_to_cache( + inputs[i], block + ) + processed_input.append(block.preprocess(inputs_cached)) else: processed_input = inputs return processed_input @@ -1445,7 +1322,8 @@ def postprocess_data( f"{block.__class__} Component with id {output_id} not a valid output component." ) prediction_value = block.postprocess(prediction_value) - output.append(prediction_value) + outputs_cached = processing_utils.move_files_to_cache(prediction_value, block) # type: ignore + output.append(outputs_cached) return output @@ -1462,11 +1340,9 @@ def handle_streaming_outputs( self.pending_streams[session_hash][run] = {} stream_run = self.pending_streams[session_hash][run] - from gradio.events import StreamableOutput - for i, output_id in enumerate(self.dependencies[fn_index]["outputs"]): block = self.blocks[output_id] - if isinstance(block, StreamableOutput) and block.streaming: + if isinstance(block, components.StreamingOutput) and block.streaming: first_chunk = output_id not in stream_run binary_data, output_data = block.stream_output( data[i], f"{session_hash}/{run}/{output_id}", first_chunk @@ -1622,10 +1498,12 @@ def get_layout(block): "type": block.get_block_name(), "props": utils.delete_none(props), } - serializer = utils.get_serializer_name(block) - if serializer: - assert isinstance(block, serializing.Serializable) - block_config["serializer"] = serializer + block_config["skip_api"] = block.skip_api + block_config["component_class_id"] = getattr( + block, "component_class_id", None + ) + + if not block.skip_api: block_config["api_info"] = block.api_info() # type: ignore block_config["example_inputs"] = block.example_inputs() # type: ignore config["components"].append(block_config) @@ -1665,7 +1543,7 @@ def load( outputs: Component | list[Component] | None = None, api_name: str | None | Literal[False] = None, scroll_to_output: bool = False, - show_progress: str = "full", + show_progress: Literal["full", "hidden", "minimal"] | None = "full", queue=None, batch: bool = False, max_batch_size: int = 4, @@ -1750,7 +1628,7 @@ def get_time(): every=every, no_target=True, ) - return Dependency(dep, dep_index, fn) + return Dependency(None, dep, dep_index, fn) def clear(self): """Resets the layout of the Blocks object.""" @@ -2265,7 +2143,7 @@ def reverse(text): ): self.block_thread() - return TupleNoPrint((self.server_app, self.local_url, self.share_url)) + return TupleNoPrint((self.server_app, self.local_url, self.share_url)) # type: ignore def integrate( self, @@ -2371,12 +2249,11 @@ def attach_load_events(self): if Context.root_block: for component in Context.root_block.blocks.values(): if ( - isinstance(component, components.IOComponent) + isinstance(component, components.Component) and component.load_event_to_attach ): load_fn, every = component.load_event_to_attach # Use set_event_trigger to avoid ambiguity between load class/instance method - from gradio.events import EventListenerMethod dep = self.set_event_trigger( [EventListenerMethod(self, "load")], @@ -2406,3 +2283,96 @@ def queue_enabled_for_fn(self, fn_index: int): if self.dependencies[fn_index]["queue"] is None: return self.enable_queue return self.dependencies[fn_index]["queue"] + + def get_api_info(self): + """ + Gets the information needed to generate the API docs from a Blocks. + """ + config = self.config + api_info = {"named_endpoints": {}, "unnamed_endpoints": {}} + mode = config.get("mode", None) + + for d, dependency in enumerate(config["dependencies"]): + dependency_info = {"parameters": [], "returns": []} + skip_endpoint = False + + inputs = dependency["inputs"] + for i in inputs: + for component in config["components"]: + if component["id"] == i: + break + else: + skip_endpoint = True # if component not found, skip endpoint + break + type = component["type"] + if self.blocks[component["id"]].skip_api: + continue + label = component["props"].get("label", f"parameter_{i}") + # The config has the most specific API info (taking into account the parameters + # of the component), so we use that if it exists. Otherwise, we fallback to the + # Serializer's API info. + info = self.get_component(component["id"]).api_info() + example = self.get_component(component["id"]).example_inputs() + python_type = client_utils.json_schema_to_python_type(info) + dependency_info["parameters"].append( + { + "label": label, + "type": info, + "python_type": { + "type": python_type, + "description": info.get("description", ""), + }, + "component": type.capitalize(), + "example_input": example, + } + ) + + outputs = dependency["outputs"] + for o in outputs: + for component in config["components"]: + if component["id"] == o: + break + else: + skip_endpoint = True # if component not found, skip endpoint + break + type = component["type"] + if self.blocks[component["id"]].skip_api: + continue + label = component["props"].get("label", f"value_{o}") + info = self.get_component(component["id"]).api_info() + example = self.get_component(component["id"]).example_inputs() + python_type = client_utils.json_schema_to_python_type(info) + dependency_info["returns"].append( + { + "label": label, + "type": info, + "python_type": { + "type": python_type, + "description": info.get("description", ""), + }, + "component": type.capitalize(), + } + ) + + if not dependency["backend_fn"]: + skip_endpoint = True + + if skip_endpoint: + continue + if ( + dependency["api_name"] is not None + and dependency["api_name"] is not False + ): + api_info["named_endpoints"][ + f"/{dependency['api_name']}" + ] = dependency_info + elif ( + dependency["api_name"] is False + or mode == "interface" + or mode == "tabbed_interface" + ): + pass # Skip unnamed endpoints in interface mode + else: + api_info["unnamed_endpoints"][str(d)] = dependency_info + + return api_info diff --git a/gradio/chat_interface.py b/gradio/chat_interface.py index 740d7f50e5d1..f3a581c17732 100644 --- a/gradio/chat_interface.py +++ b/gradio/chat_interface.py @@ -16,13 +16,13 @@ from gradio.components import ( Button, Chatbot, - IOComponent, + Component, Markdown, State, Textbox, get_component_instance, ) -from gradio.events import Dependency, EventListenerMethod, on +from gradio.events import Dependency, on from gradio.helpers import create_examples as Examples # noqa: N812 from gradio.helpers import special_args from gradio.layouts import Accordion, Column, Group, Row @@ -59,7 +59,7 @@ def __init__( *, chatbot: Chatbot | None = None, textbox: Textbox | None = None, - additional_inputs: str | IOComponent | list[str | IOComponent] | None = None, + additional_inputs: str | Component | list[str | Component] | None = None, additional_inputs_accordion_name: str = "Additional Inputs", examples: list[str] | None = None, cache_examples: bool | None = None, @@ -115,7 +115,7 @@ def __init__( self.cache_examples = True else: self.cache_examples = cache_examples or False - self.buttons: list[Button] = [] + self.buttons: list[Button | None] = [] if additional_inputs: if not isinstance(additional_inputs, list): @@ -146,7 +146,9 @@ def __init__( if textbox: textbox.container = False textbox.show_label = False - self.textbox = textbox.render() + textbox_ = textbox.render() + assert isinstance(textbox_, Textbox) + self.textbox = textbox_ else: self.textbox = Textbox( container=False, @@ -186,7 +188,7 @@ def __init__( raise ValueError( f"The stop_btn parameter must be a gr.Button, string, or None, not {type(stop_btn)}" ) - self.buttons.extend([submit_btn, stop_btn]) + self.buttons.extend([submit_btn, stop_btn]) # type: ignore with Row(): for btn in [retry_btn, undo_btn, clear_btn]: @@ -199,7 +201,7 @@ def __init__( raise ValueError( f"All the _btn parameters must be a gr.Button, string, or None, not {type(btn)}" ) - self.buttons.append(btn) + self.buttons.append(btn) # type: ignore self.fake_api_btn = Button("Fake API", visible=False) self.fake_response_textbox = Textbox( @@ -327,7 +329,7 @@ def _setup_events(self) -> None: ) def _setup_stop_events( - self, event_triggers: list[EventListenerMethod], event_to_cancel: Dependency + self, event_triggers: list[Callable], event_to_cancel: Dependency ) -> None: if self.stop_btn and self.is_generator: if self.submit_btn: diff --git a/gradio/cli.py b/gradio/cli.py deleted file mode 100644 index b521bbf0a3cb..000000000000 --- a/gradio/cli.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys - -from gradio_client.cli import deploy_discord # type: ignore - -import gradio.cli_env_info -import gradio.deploy_space -import gradio.reload - - -def cli(): - args = sys.argv[1:] - if len(args) == 0: - raise ValueError("No file specified.") - elif args[0] == "deploy": - gradio.deploy_space.deploy() - elif args[0] == "environment": - gradio.cli_env_info.print_environment_info() - elif args[0] == "deploy-discord": - deploy_discord.main() - else: - gradio.reload.main() diff --git a/gradio/cli/__init__.py b/gradio/cli/__init__.py new file mode 100644 index 000000000000..1fca2de36be2 --- /dev/null +++ b/gradio/cli/__init__.py @@ -0,0 +1,4 @@ +from .cli import cli, deploy +from .commands import custom_component + +__all__ = ["cli", "deploy", "custom_component"] diff --git a/gradio/cli/cli.py b/gradio/cli/cli.py new file mode 100644 index 000000000000..d20fd573c3cf --- /dev/null +++ b/gradio/cli/cli.py @@ -0,0 +1,31 @@ +import sys + +import typer +from gradio_client.cli import deploy_discord # type: ignore + +from .commands import custom_component, deploy, print_environment_info, reload + +app = typer.Typer() +app.command("environment", help="Print Gradio environment information.")( + print_environment_info +) +app.command( + "deploy", + help="Deploy a Gradio app to Spaces. Must be called within the directory you would like to deploy.", +)(deploy) +app.command("deploy-discord", help="Deploy a Gradio app to Discord.")( + deploy_discord.main +) + + +def cli(): + args = sys.argv[1:] + if len(args) == 0: + raise ValueError("No file specified.") + if args[0] in {"deploy", "environment", "deploy-discord"}: + app() + elif args[0] in {"cc", "component"}: + sys.argv = sys.argv[1:] + custom_component() + else: + typer.run(reload) diff --git a/gradio/cli/commands/__init__.py b/gradio/cli/commands/__init__.py new file mode 100644 index 000000000000..e4b20dbfc7bb --- /dev/null +++ b/gradio/cli/commands/__init__.py @@ -0,0 +1,6 @@ +from .cli_env_info import print_environment_info +from .components import app as custom_component +from .deploy_space import deploy +from .reload import main as reload + +__all__ = ["deploy", "reload", "print_environment_info", "custom_component"] diff --git a/gradio/cli_env_info.py b/gradio/cli/commands/cli_env_info.py similarity index 95% rename from gradio/cli_env_info.py rename to gradio/cli/commands/cli_env_info.py index 29df6e344144..5156f3a44940 100644 --- a/gradio/cli_env_info.py +++ b/gradio/cli/commands/cli_env_info.py @@ -4,8 +4,11 @@ import platform from importlib import metadata +from rich import print + def print_environment_info(): + """Print Gradio environment information.""" print("Gradio Environment Information:\n------------------------------") print("Operating System:", platform.system()) diff --git a/gradio/cli/commands/components/__init__.py b/gradio/cli/commands/components/__init__.py new file mode 100644 index 000000000000..34f275ed3c9d --- /dev/null +++ b/gradio/cli/commands/components/__init__.py @@ -0,0 +1,3 @@ +from .app import app + +__all__ = ["app"] diff --git a/gradio/cli/commands/components/_create_utils.py b/gradio/cli/commands/components/_create_utils.py new file mode 100644 index 000000000000..6039aaa29262 --- /dev/null +++ b/gradio/cli/commands/components/_create_utils.py @@ -0,0 +1,289 @@ +from __future__ import annotations + +import dataclasses +import inspect +import json +import re +import shutil +import textwrap +from pathlib import Path +from typing import Literal + +import gradio + + +def _in_test_dir(): + """Check if the current working directory ends with gradio/js/gradio-preview/test.""" + return Path.cwd().parts[-4:] == ("gradio", "js", "gradio-preview", "test") + + +default_demo_code = """ +example = {name}().example_inputs() + +with gr.Blocks() as demo: + {name}(value=example, interactive=True) + {name}(value=example, interactive=False) +""" + + +@dataclasses.dataclass +class ComponentFiles: + template: str + demo_code: str = default_demo_code + python_file_name: str = "" + js_dir: str = "" + + def __post_init__(self): + self.js_dir = self.js_dir or self.template.lower() + self.python_file_name = self.python_file_name or f"{self.template.lower()}.py" + + +OVERRIDES = { + "AnnotatedImage": ComponentFiles( + template="AnnotatedImage", python_file_name="annotated_image.py" + ), + "HighlightedText": ComponentFiles( + template="HighlightedText", python_file_name="highlighted_text.py" + ), + "BarPlot": ComponentFiles( + template="BarPlot", python_file_name="bar_plot.py", js_dir="plot" + ), + "ClearButton": ComponentFiles( + template="ClearButton", python_file_name="clear_button.py", js_dir="button" + ), + "ColorPicker": ComponentFiles( + template="ColorPicker", python_file_name="color_picker.py" + ), + "DuplicateButton": ComponentFiles( + template="DuplicateButton", + python_file_name="duplicate_button.py", + js_dir="button", + ), + "LinePlot": ComponentFiles( + template="LinePlot", python_file_name="line_plot.py", js_dir="plot" + ), + "LogoutButton": ComponentFiles( + template="LogoutButton", python_file_name="logout_button.py", js_dir="button" + ), + "LoginButton": ComponentFiles( + template="LoginButton", python_file_name="login_button.py", js_dir="button" + ), + "ScatterPlot": ComponentFiles( + template="ScatterPlot", python_file_name="scatter_plot.py", js_dir="plot" + ), + "UploadButton": ComponentFiles( + template="UploadButton", python_file_name="upload_button.py" + ), + "JSON": ComponentFiles(template="JSON", python_file_name="json_component.py"), + "Row": ComponentFiles( + template="Row", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) + """ + ), + ), + "Column": ComponentFiles( + template="Column", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) + """ + ), + ), + "Tabs": ComponentFiles( + template="Tabs", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(): + with gr.Tab("Tab 1"): + gr.Textbox(value="foo", interactive=True) + with gr.Tab("Tab 2"): + gr.Number(value=10, interactive=True) + """ + ), + ), + "Group": ComponentFiles( + template="Group", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) + """ + ), + ), + "Accordion": ComponentFiles( + template="Accordion", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(label="Accordion"): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) + """ + ), + ), +} + + +def _get_component_code(template: str | None) -> ComponentFiles: + template = template or "Fallback" + if template in OVERRIDES: + return OVERRIDES[template] + else: + return ComponentFiles( + python_file_name=f"{template.lower()}.py", + js_dir=template.lower(), + template=template, + ) + + +def _get_js_dependency_version(name: str, local_js_dir: Path) -> str: + package_json = json.loads( + Path(local_js_dir / name.split("/")[1] / "package.json").read_text() + ) + return package_json["version"] + + +def _modify_js_deps( + package_json: dict, + key: Literal["dependencies", "devDependencies"], + gradio_dir: Path, +): + for dep in package_json.get(key, []): + # if curent working directory is the gradio repo, use the local version of the dependency' + if not _in_test_dir() and dep.startswith("@gradio/"): + package_json[key][dep] = _get_js_dependency_version( + dep, gradio_dir / "_frontend_code" + ) + return package_json + + +def delete_contents(directory: str | Path) -> None: + """Delete all contents of a directory, but not the directory itself.""" + path = Path(directory) + for child in path.glob("*"): + if child.is_file(): + child.unlink() + elif child.is_dir(): + delete_contents(child) + child.rmdir() + + +def _create_frontend(name: str, component: ComponentFiles, directory: Path): + frontend = directory / "frontend" + frontend.mkdir(exist_ok=True) + + p = Path(inspect.getfile(gradio)).parent + + def ignore(s, names): + ignored = [] + for n in names: + if ( + n.startswith("CHANGELOG") + or n.startswith("README.md") + or ".test." in n + or ".stories." in n + or ".spec." in n + ): + ignored.append(n) + return ignored + + shutil.copytree( + str(p / "_frontend_code" / component.js_dir), + frontend, + dirs_exist_ok=True, + ignore=ignore, + ) + source_package_json = json.loads(Path(frontend / "package.json").read_text()) + source_package_json["name"] = name.lower() + source_package_json = _modify_js_deps(source_package_json, "dependencies", p) + source_package_json = _modify_js_deps(source_package_json, "devDependencies", p) + (frontend / "package.json").write_text(json.dumps(source_package_json, indent=2)) + + +def _replace_old_class_name(old_class_name: str, new_class_name: str, content: str): + pattern = rf"(?<=\b)(?>", package_name)) + + demo_dir = directory / "demo" + demo_dir.mkdir(exist_ok=True, parents=True) + + (demo_dir / "app.py").write_text( + f""" +import gradio as gr +from {package_name} import {name} + +{component.demo_code.format(name=name)} + +demo.launch() +""" + ) + (demo_dir / "__init__.py").touch() + + init = backend / "__init__.py" + init.write_text( + f""" +from .{name.lower()} import {name} + +__all__ = ['{name}'] +""" + ) + + p = Path(inspect.getfile(gradio)).parent + python_file = backend / f"{name.lower()}.py" + + shutil.copy( + str(p / module / component.python_file_name), + str(python_file), + ) + + source_pyi_file = p / module / component.python_file_name.replace(".py", ".pyi") + pyi_file = backend / f"{name.lower()}.pyi" + if source_pyi_file.exists(): + shutil.copy(str(source_pyi_file), str(pyi_file)) + + content = python_file.read_text() + python_file.write_text(_replace_old_class_name(component.template, name, content)) + if pyi_file.exists(): + pyi_content = pyi_file.read_text() + pyi_file.write_text( + _replace_old_class_name(component.template, name, pyi_content) + ) diff --git a/gradio/cli/commands/components/app.py b/gradio/cli/commands/components/app.py new file mode 100644 index 000000000000..7c2c391ebe5d --- /dev/null +++ b/gradio/cli/commands/components/app.py @@ -0,0 +1,14 @@ +from typer import Typer + +from .build import _build +from .create import _create +from .dev import _dev + +app = Typer(help="Create and publish a new Gradio component") + +app.command("create", help="Create a new component.")(_create) +app.command( + "build", + help="Build the component for distribution. Must be called from the component directory.", +)(_build) +app.command("dev", help="Launch the custom component demo in development mode.")(_dev) diff --git a/gradio/cli/commands/components/build.py b/gradio/cli/commands/components/build.py new file mode 100644 index 000000000000..87487e011660 --- /dev/null +++ b/gradio/cli/commands/components/build.py @@ -0,0 +1,69 @@ +import shutil +import subprocess +import sys +from pathlib import Path + +import typer +from typing_extensions import Annotated + +import gradio +from gradio.cli.commands.display import LivePanelDisplay + +gradio_template_path = Path(gradio.__file__).parent / "templates" / "frontend" +gradio_node_path = Path(gradio.__file__).parent / "node" / "dev" / "files" / "index.js" + + +def _build( + path: Annotated[ + Path, typer.Argument(help="The directory of the custom component.") + ] = Path("."), + build_frontend: Annotated[ + bool, typer.Option(help="Whether to build the frontend as well.") + ] = True, +): + name = Path(path).resolve() + if not (name / "pyproject.toml").exists(): + raise ValueError(f"Cannot find pyproject.toml file in {name}") + + with LivePanelDisplay() as live: + live.update( + f":package: Building package in [orange3]{str(name.name)}[/]", add_sleep=0.2 + ) + if build_frontend: + live.update(":art: Building frontend") + component_directory = path.resolve() + + node = shutil.which("node") + if not node: + raise ValueError("node must be installed in order to run dev mode.") + + node_cmds = [ + node, + gradio_node_path, + "--component-directory", + component_directory, + "--root", + gradio_template_path, + "--mode", + "build", + ] + + pipe = subprocess.run(node_cmds, capture_output=True, text=True) + if pipe.returncode != 0: + live.update(":red_square: Build failed!") + live.update(pipe.stderr) + return + else: + live.update(":white_check_mark: Build succeeded!") + + cmds = [sys.executable, "-m", "build", str(name)] + live.update(f":construction_worker: Building... [grey37]({' '.join(cmds)})[/]") + pipe = subprocess.run(cmds, capture_output=True, text=True) + if pipe.returncode != 0: + live.update(":red_square: Build failed!") + live.update(pipe.stderr) + else: + live.update(":white_check_mark: Build succeeded!") + live.update( + f":ferris_wheel: Wheel located in [orange3]{str(name / 'dist')}[/]" + ) diff --git a/gradio/cli/commands/components/create.py b/gradio/cli/commands/components/create.py new file mode 100644 index 000000000000..88a1e0fb94ae --- /dev/null +++ b/gradio/cli/commands/components/create.py @@ -0,0 +1,127 @@ +import shutil +import subprocess +from pathlib import Path +from typing import Optional + +import typer +from rich.markup import escape +from typing_extensions import Annotated + +from gradio.cli.commands.display import LivePanelDisplay +from gradio.utils import set_directory + +from . import _create_utils + + +def _create( + name: Annotated[ + str, + typer.Argument( + help="Name of the component. Preferably in camel case, i.e. MyTextBox." + ), + ], + directory: Annotated[ + Optional[Path], + typer.Option( + help="Directory to create the component in. Default is None. If None, will be created in directory in the current directory." + ), + ] = None, + package_name: Annotated[ + Optional[str], + typer.Option(help="Name of the package. Default is gradio_{name.lower()}"), + ] = None, + template: Annotated[ + str, + typer.Option( + help="Component to use as a template. Should use exact name of python class." + ), + ] = "", + install: Annotated[ + bool, + typer.Option( + help="Whether to install the component in your current environment as a development install. Recommended for development." + ), + ] = False, + npm_install: Annotated[ + str, + typer.Option(help="NPM install command to use. Default is 'npm install'."), + ] = "npm install", + overwrite: Annotated[ + bool, + typer.Option(help="Whether to overwrite the existing component if it exists."), + ] = False, +): + if not directory: + directory = Path(name.lower()) + if not package_name: + package_name = f"gradio_{name.lower()}" + + if directory.exists() and not overwrite: + raise ValueError( + f"The directory {directory.resolve()} already exists. " + "Please set --overwrite flag or pass in the name " + "of a directory that does not already exist via the --directory option." + ) + elif directory.exists() and overwrite: + _create_utils.delete_contents(directory) + + directory.mkdir(exist_ok=overwrite) + + if _create_utils._in_test_dir(): + npm_install = f"{shutil.which('pnpm')} i --ignore-scripts" + + npm_install = npm_install.strip() + if npm_install == "npm install": + npm = shutil.which("npm") + if not npm: + raise ValueError( + "By default, the install command uses npm to install " + "the frontend dependencies. Please install npm or pass your own install command " + "via the --npm-install option." + ) + npm_install = f"{npm} install" + + with LivePanelDisplay() as live: + live.update( + f":building_construction: Creating component [orange3]{name}[/] in directory [orange3]{directory}[/]", + add_sleep=0.2, + ) + if template: + live.update(f":fax: Starting from template [orange3]{template}[/]") + else: + live.update(":page_facing_up: Creating a new component from scratch.") + + component = _create_utils._get_component_code(template) + + _create_utils._create_backend(name, component, directory, package_name) + live.update(":snake: Created backend code", add_sleep=0.2) + + _create_utils._create_frontend(name.lower(), component, directory=directory) + live.update(":art: Created frontend code", add_sleep=0.2) + + if install: + cmds = [shutil.which("pip"), "install", "-e", f"{str(directory)}[dev]"] + live.update( + f":construction_worker: Installing python... [grey37]({escape(' '.join(cmds))})[/]" + ) + pipe = subprocess.run(cmds, capture_output=True, text=True) + + if pipe.returncode != 0: + live.update(":red_square: Python installation [bold][red]failed[/][/]") + live.update(pipe.stderr) + else: + live.update(":white_check_mark: Python install succeeded!") + + live.update( + f":construction_worker: Installing javascript... [grey37]({npm_install})[/]" + ) + with set_directory(directory / "frontend"): + pipe = subprocess.run( + npm_install.split(), capture_output=True, text=True + ) + if pipe.returncode != 0: + live.update(":red_square: NPM install [bold][red]failed[/][/]") + live.update(pipe.stdout) + live.update(pipe.stderr) + else: + live.update(":white_check_mark: NPM install succeeded!") diff --git a/gradio/cli/commands/components/dev.py b/gradio/cli/commands/components/dev.py new file mode 100644 index 000000000000..30b8e6140be6 --- /dev/null +++ b/gradio/cli/commands/components/dev.py @@ -0,0 +1,83 @@ +import shutil +import subprocess +from pathlib import Path + +import typer +from rich import print +from typing_extensions import Annotated + +import gradio + +gradio_template_path = Path(gradio.__file__).parent / "templates" / "frontend" +gradio_node_path = Path(gradio.__file__).parent / "node" / "dev" / "files" / "index.js" + + +def _dev( + app: Annotated[ + Path, + typer.Argument( + help="The path to the app. By default, looks for demo/app.py in the current directory." + ), + ] = Path("demo") + / "app.py", + component_directory: Annotated[ + Path, + typer.Option( + help="The directory with the custom component source code. By default, uses the current directory." + ), + ] = Path("."), + host: Annotated[ + str, + typer.Option( + help="The host to run the front end server on. Defaults to localhost.", + ), + ] = "localhost", +): + component_directory = component_directory.resolve() + + print(f":recycle: [green]Launching[/] {app} in reload mode\n") + + node = shutil.which("node") + if not node: + raise ValueError("node must be installed in order to run dev mode.") + + proc = subprocess.Popen( + [ + node, + gradio_node_path, + "--component-directory", + component_directory, + "--root", + gradio_template_path, + "--app", + str(app), + "--mode", + "dev", + "--host", + host, + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + while True: + proc.poll() + text = proc.stdout.readline() # type: ignore + err = proc.stderr.readline() # type: ignore + + text = ( + text.decode("utf-8") + .replace("Changes detected in:", "[orange3]Changed detected in:[/]") + .replace("Watching:", "[orange3]Watching:[/]") + .replace("Running on local URL", "[orange3]Backend Server[/]") + ) + + if "[orange3]Watching:[/]" in text: + text += f"'{str(component_directory / 'frontend').strip()}'," + if "To create a public link" in text: + continue + print(text) + print(err.decode("utf-8")) + + if proc.returncode is not None: + print("Backend server failed to launch. Exiting.") + return diff --git a/gradio/cli/commands/components/files/gitignore b/gradio/cli/commands/components/files/gitignore new file mode 100644 index 000000000000..60188eefb61f --- /dev/null +++ b/gradio/cli/commands/components/files/gitignore @@ -0,0 +1,9 @@ +.eggs/ +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +__tmp/* +*.pyi +node_modules \ No newline at end of file diff --git a/gradio/cli/commands/components/files/pyproject_.toml b/gradio/cli/commands/components/files/pyproject_.toml new file mode 100644 index 000000000000..ceaf3013f265 --- /dev/null +++ b/gradio/cli/commands/components/files/pyproject_.toml @@ -0,0 +1,47 @@ +[build-system] +requires = [ + "hatchling", + "hatch-requirements-txt", + "hatch-fancy-pypi-readme>=22.5.0", +] +build-backend = "hatchling.build" + +[project] +name = "<>" +version = "0.0.1" +description = "Python library for easily interacting with trained machine learning models" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [{ name = "YOUR NAME", email = "YOUREMAIL@domain.com" }] +keywords = [ + "machine learning", + "reproducibility", + "visualization", + "gradio", + "gradio custom component", +] +# Add dependencies here +dependencies = ["gradio"] +classifiers = [ + 'Development Status :: 3 - Alpha', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Visualization', +] + +[project.optional-dependencies] +dev = ["build", "twine"] + +[tool.hatch.build] +artifacts = ["/backend/<>/templates", "*.pyi"] + +[tool.hatch.build.targets.wheel] +packages = ["/backend/<>"] diff --git a/gradio/deploy_space.py b/gradio/cli/commands/deploy_space.py similarity index 93% rename from gradio/deploy_space.py rename to gradio/cli/commands/deploy_space.py index 9014b4e24ea2..c82564456335 100644 --- a/gradio/deploy_space.py +++ b/gradio/cli/commands/deploy_space.py @@ -1,10 +1,13 @@ from __future__ import annotations -import argparse import os import re +from typing import Optional import huggingface_hub +from rich import print +from typer import Option +from typing_extensions import Annotated import gradio as gr @@ -115,17 +118,16 @@ def format_title(title: str): return title -def deploy(): +def deploy( + title: Annotated[Optional[str], Option(help="Spaces app title")] = None, + app_file: Annotated[ + Optional[str], Option(help="File containing the Gradio app") + ] = None, +): if ( os.getenv("SYSTEM") == "spaces" ): # in case a repo with this function is uploaded to spaces return - parser = argparse.ArgumentParser(description="Deploy to Spaces") - parser.add_argument("deploy") - parser.add_argument("--title", type=str, help="Spaces app title") - parser.add_argument("--app-file", type=str, help="File containing the Gradio app") - - args = parser.parse_args() hf_api = huggingface_hub.HfApi() whoami = None @@ -153,8 +155,8 @@ def deploy(): f"Creating new Spaces Repo in '{repo_directory}'. Collecting metadata, press Enter to accept default value." ) configuration = add_configuration_to_readme( - args.title, - args.app_file, + title, + app_file, ) space_id = huggingface_hub.create_repo( diff --git a/gradio/cli/commands/display.py b/gradio/cli/commands/display.py new file mode 100644 index 000000000000..c3e5c87c48f9 --- /dev/null +++ b/gradio/cli/commands/display.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import time +from types import TracebackType +from typing import Optional + +from rich.live import Live +from rich.panel import Panel + + +class LivePanelDisplay: + def __init__(self, msg: str | None = None) -> None: + self.lines = [msg] if msg else [] + self._panel = Live(Panel("\n".join(self.lines)), refresh_per_second=5) + + def update(self, msg: str, add_sleep: float | None = None): + self.lines.append(msg) + self._panel.update(Panel("\n".join(self.lines))) + if add_sleep: + time.sleep(add_sleep) + + def __enter__(self) -> LivePanelDisplay: + self._panel.__enter__() + return self + + def __exit__( + self, + exc_type: Optional[type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self._panel.stop() diff --git a/gradio/reload.py b/gradio/cli/commands/reload.py similarity index 62% rename from gradio/reload.py rename to gradio/cli/commands/reload.py index c072c4c0e5f3..a337f9faa036 100644 --- a/gradio/reload.py +++ b/gradio/cli/commands/reload.py @@ -5,6 +5,8 @@ $ gradio app.py, to run app.py in reload mode where any changes in the app.py file or Gradio library reloads the demo. $ gradio app.py my_demo, to use variable names other than "demo" """ +from __future__ import annotations + import inspect import os import re @@ -12,6 +14,10 @@ import sys import threading from pathlib import Path +from typing import List, Optional + +import typer +from rich import print import gradio from gradio import utils @@ -19,21 +25,12 @@ reload_thread = threading.local() -def _setup_config(): - args = sys.argv[1:] - if len(args) == 0: - raise ValueError("No file specified.") - if len(args) == 1 or args[1].startswith("--"): - demo_name = "demo" - else: - demo_name = args[1] - if "." in demo_name: - demo_name = demo_name.split(".")[0] - print( - "\nWARNING: As of Gradio 3.41.0, the parameter after the file path must be the name of the Gradio demo, not the FastAPI app. In most cases, this just means you should remove '.app' after the name of your demo, e.g. 'demo.app' -> 'demo'." - ) - - original_path = args[0] +def _setup_config( + demo_path: Path, + demo_name: str = "demo", + additional_watch_dirs: list[str] | None = None, +): + original_path = demo_path app_text = Path(original_path).read_text() patterns = [ @@ -42,21 +39,18 @@ def _setup_config(): f"{demo_name} = gr\\.Interface", f"{demo_name} = gr\\.ChatInterface", f"{demo_name} = gr\\.Series", - f"{demo_name} = gr\\.Paralles", + f"{demo_name} = gr\\.Parallel", f"{demo_name} = gr\\.TabbedInterface", ] if not any(re.search(p, app_text) for p in patterns): print( - f"\nWarning: Cannot statically find a gradio demo called {demo_name}. " + f"\n[bold red]Warning[/]: Cannot statically find a gradio demo called {demo_name}. " "Reload work may fail." ) abs_original_path = utils.abspath(original_path) - path = os.path.normpath(original_path) - path = path.replace("/", ".") - path = path.replace("\\", ".") - filename = os.path.splitext(path)[0] + filename = Path(original_path).stem gradio_folder = Path(inspect.getfile(gradio)).parent @@ -76,6 +70,21 @@ def _setup_config(): message += "," message += f" '{abs_parent}'" + abs_parent = Path(".").resolve() + if str(abs_parent).strip(): + watching_dirs.append(abs_parent) + if message_change_count == 1: + message += "," + message += f" '{abs_parent}'" + + for wd in additional_watch_dirs or []: + if Path(wd) not in watching_dirs: + watching_dirs.append(wd) + + if message_change_count == 1: + message += "," + message += f" '{wd}'" + print(message + "\n") # guaranty access to the module of an app @@ -83,13 +92,16 @@ def _setup_config(): return filename, abs_original_path, [str(s) for s in watching_dirs], demo_name -def main(): +def main( + demo_path: Path, demo_name: str = "demo", watch_dirs: Optional[List[str]] = None +): # default execution pattern to start the server and watch changes - filename, path, watch_dirs, demo_name = _setup_config() - args = sys.argv[1:] - extra_args = args[1:] if len(args) == 1 or args[1].startswith("--") else args[2:] + filename, path, watch_dirs, demo_name = _setup_config( + demo_path, demo_name, watch_dirs + ) + # extra_args = args[1:] if len(args) == 1 or args[1].startswith("--") else args[2:] popen = subprocess.Popen( - ["python", path] + extra_args, + [sys.executable, "-u", path], env=dict( os.environ, GRADIO_WATCH_DIRS=",".join(watch_dirs), @@ -101,4 +113,4 @@ def main(): if __name__ == "__main__": - main() + typer.run(main) diff --git a/gradio/cli/commands/utils.py b/gradio/cli/commands/utils.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/gradio/component_meta.py b/gradio/component_meta.py new file mode 100644 index 000000000000..3340c0bda66a --- /dev/null +++ b/gradio/component_meta.py @@ -0,0 +1,191 @@ +from __future__ import annotations + +import ast +import inspect +from abc import ABCMeta +from functools import wraps +from pathlib import Path + +from jinja2 import Template + +from gradio.data_classes import GradioModel, GradioRootModel +from gradio.events import EventListener +from gradio.exceptions import ComponentDefinitionError + +INTERFACE_TEMPLATE = ''' +{{ contents }} + + {% for event in events %} + def {{ event }}(self, + fn: Callable | None, + inputs: Component | Sequence[Component] | set[Component] | None = None, + outputs: Component | Sequence[Component] | None = None, + api_name: str | None | Literal[False] = None, + status_tracker: None = None, + scroll_to_output: bool = False, + show_progress: Literal["full", "minimal", "hidden"] = "full", + queue: bool | None = None, + batch: bool = False, + max_batch_size: int = 4, + preprocess: bool = True, + postprocess: bool = True, + cancels: dict[str, Any] | list[dict[str, Any]] | None = None, + every: float | None = None, + _js: str | None = None,) -> Dependency: + """ + Parameters: + fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. + inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. + outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list. + api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name. + status_tracker: Deprecated and has no effect. + scroll_to_output: If True, will scroll to output component on completion + show_progress: If True, will show progress animation while pending + queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app. + batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. + max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) + preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). + postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. + cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish. + every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. + """ + ... + {% endfor %} +''' + + +def serializes(f): + @wraps(f) + def serialize(*args, **kwds): + output = f(*args, **kwds) + if isinstance(output, (GradioRootModel, GradioModel)): + output = output.model_dump() + return output + + return serialize + + +def create_pyi(class_code: str, events: list[EventListener | str]): + template = Template(INTERFACE_TEMPLATE) + events = [e if isinstance(e, str) else e.event_name for e in events] + return template.render(events=events, contents=class_code) + + +def extract_class_source_code( + code: str, class_name: str +) -> tuple[str, int] | tuple[None, None]: + class_start_line = code.find(f"class {class_name}") + if class_start_line == -1: + return None, None + + class_ast = ast.parse(code) + for node in ast.walk(class_ast): + if isinstance(node, ast.ClassDef) and node.name == class_name: + segment = ast.get_source_segment(code, node) + assert segment + return segment, node.lineno + return None, None + + +def create_or_modify_pyi( + component_class: type, class_name: str, events: list[str | EventListener] +): + source_file = Path(inspect.getfile(component_class)) + + source_code = source_file.read_text() + + current_impl, lineno = extract_class_source_code(source_code, class_name) + + assert current_impl + assert lineno + new_interface = create_pyi(current_impl, events) + + pyi_file = source_file.with_suffix(".pyi") + if not pyi_file.exists(): + last_empty_line_before_class = -1 + lines = source_code.splitlines() + for i, line in enumerate(lines): + if line in ["", " "]: + last_empty_line_before_class = i + if i >= lineno: + break + lines = ( + lines[:last_empty_line_before_class] + + ["from gradio.events import Dependency"] + + lines[last_empty_line_before_class:] + ) + pyi_file.write_text("\n".join(lines)) + current_interface, _ = extract_class_source_code(pyi_file.read_text(), class_name) + if not current_interface: + with open(str(pyi_file), mode="a") as f: + f.write(new_interface) + else: + contents = pyi_file.read_text() + contents = contents.replace(current_interface, new_interface.strip()) + pyi_file.write_text(contents) + + +def in_event_listener(): + from gradio.context import LocalContext + + return LocalContext.in_event_listener.get() + + +def updateable(fn): + @wraps(fn) + def wrapper(*args, **kwargs): + fn_args = inspect.getfullargspec(fn).args + self = args[0] + for i, arg in enumerate(args): + if i == 0 or i >= len(fn_args): # skip self, *args + continue + arg_name = fn_args[i] + kwargs[arg_name] = arg + self.constructor_args = kwargs + if in_event_listener(): + return None + else: + return fn(self, **kwargs) + + return wrapper + + +class ComponentMeta(ABCMeta): + def __new__(cls, name, bases, attrs): + if name in {"Component", "ComponentBase"}: + return super().__new__(cls, name, bases, attrs) + if "__init__" in attrs: + attrs["__init__"] = updateable(attrs["__init__"]) + if "EVENTS" not in attrs: + found = False + for base in bases: + if hasattr(base, "EVENTS"): + found = True + break + if not found: + raise ComponentDefinitionError( + f"{name} or its base classes must define an EVENTS list. " + "If no events are supported, set it to an empty list." + ) + events = attrs.get("EVENTS", []) + if not all(isinstance(e, (str, EventListener)) for e in events): + raise ComponentDefinitionError( + f"All events for {name} must either be an string or an instance " + "of EventListener." + ) + new_events = [] + for event in events: + trigger = ( + event + if isinstance(event, EventListener) + else EventListener(event_name=event) + ) + new_events.append(trigger) + attrs[event] = trigger.listener + if "EVENTS" in attrs: + attrs["EVENTS"] = new_events + if "postprocess" in attrs: + attrs["postprocess"] = serializes(attrs["postprocess"]) + component_class = super().__new__(cls, name, bases, attrs) + create_or_modify_pyi(component_class, name, events) + return component_class diff --git a/gradio/components/__init__.py b/gradio/components/__init__.py index e98213f21e81..55c5c4fd9545 100644 --- a/gradio/components/__init__.py +++ b/gradio/components/__init__.py @@ -2,12 +2,10 @@ from gradio.components.audio import Audio from gradio.components.bar_plot import BarPlot from gradio.components.base import ( - Column, Component, - Form, FormComponent, - IOComponent, - Row, + StreamingInput, + StreamingOutput, _Keywords, component, get_component_instance, @@ -24,13 +22,13 @@ from gradio.components.dataset import Dataset from gradio.components.dropdown import Dropdown from gradio.components.duplicate_button import DuplicateButton +from gradio.components.fallback import Fallback from gradio.components.file import File from gradio.components.file_explorer import FileExplorer from gradio.components.gallery import Gallery from gradio.components.highlighted_text import HighlightedText from gradio.components.html import HTML from gradio.components.image import Image -from gradio.components.interpretation import Interpretation from gradio.components.json_component import JSON from gradio.components.label import Label from gradio.components.line_plot import LinePlot @@ -46,9 +44,9 @@ from gradio.components.state import State, Variable from gradio.components.status_tracker import StatusTracker from gradio.components.textbox import Textbox -from gradio.components.timeseries import Timeseries from gradio.components.upload_button import UploadButton from gradio.components.video import Video +from gradio.layouts import Form Text = Textbox DataFrame = Dataframe @@ -56,7 +54,6 @@ Annotatedimage = AnnotatedImage Highlight = HighlightedText Checkboxgroup = CheckboxGroup -TimeSeries = Timeseries Json = JSON __all__ = [ @@ -74,19 +71,17 @@ "CheckboxGroup", "Code", "ColorPicker", - "Column", "Dataframe", "DataFrame", "Dataset", "DuplicateButton", + "Fallback", "Form", "FormComponent", "Gallery", "HTML", "FileExplorer", "Image", - "IOComponent", - "Interpretation", "JSON", "Json", "Label", @@ -101,17 +96,14 @@ "HighlightedText", "AnnotatedImage", "CheckboxGroup", - "Timeseries", "Text", "Highlightedtext", "Annotatedimage", "Highlight", "Checkboxgroup", - "TimeSeries", "Number", "Plot", "Radio", - "Row", "ScatterPlot", "Slider", "State", @@ -119,4 +111,6 @@ "StatusTracker", "UploadButton", "Video", + "StreamingInput", + "StreamingOutput", ] diff --git a/gradio/components/annotated_image.py b/gradio/components/annotated_image.py index b3034c17e5f4..d231476b6e65 100644 --- a/gradio/components/annotated_image.py +++ b/gradio/components/annotated_image.py @@ -3,28 +3,34 @@ from __future__ import annotations import warnings -from typing import Literal +from typing import Any, List, Literal import numpy as np from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable from PIL import Image as _Image # using _ to minimize namespace pollution -from gradio import utils -from gradio.components.base import IOComponent, _Keywords -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - EventListenerMethod, - Selectable, -) +from gradio import processing_utils, utils +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData, GradioModel +from gradio.events import Events set_documentation_group("component") _Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 +class Annotation(GradioModel): + image: FileData + label: str + + +class AnnotatedImageData(GradioModel): + image: FileData + annotations: List[Annotation] + + @document() -class AnnotatedImage(Selectable, IOComponent, JSONSerializable): +class AnnotatedImage(Component): """ Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation). Preprocessing: this component does *not* accept input. @@ -33,6 +39,10 @@ class AnnotatedImage(Selectable, IOComponent, JSONSerializable): Demos: image_segmentation """ + EVENTS = [Events.select] + + data_model = AnnotatedImageData + def __init__( self, value: tuple[ @@ -77,14 +87,7 @@ def __init__( self.height = height self.width = width self.color_map = color_map - self.select: EventListenerMethod - """ - Event listener for when the user selects Image subsection. - Uses event data gradio.SelectData to carry `value` referring to selected subsection label, and `index` to refer to subsection index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -141,7 +144,7 @@ def postprocess( np.ndarray | _Image.Image | str, list[tuple[np.ndarray | tuple[int, int, int, int], str]], ], - ) -> tuple[dict, list[tuple[dict, str]]] | None: + ) -> AnnotatedImageData | None: """ Parameters: y: Tuple of base image and list of subsections, with each subsection a two-part tuple where the first element is a 4 element bounding box or a 0-1 confidence mask, and the second element is the label. @@ -155,17 +158,20 @@ def postprocess( base_img_path = base_img base_img = np.array(_Image.open(base_img)) elif isinstance(base_img, np.ndarray): - base_file = self.img_array_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR) + base_file = processing_utils.save_img_array_to_cache( + base_img, cache_dir=self.GRADIO_CACHE + ) base_img_path = str(utils.abspath(base_file)) elif isinstance(base_img, _Image.Image): - base_file = self.pil_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR) + base_file = processing_utils.save_pil_to_cache( + base_img, cache_dir=self.GRADIO_CACHE + ) base_img_path = str(utils.abspath(base_file)) base_img = np.array(base_img) else: raise ValueError( "AnnotatedImage only accepts filepaths, PIL images or numpy arrays for the base image." ) - self.temp_files.add(base_img_path) sections = [] color_map = self.color_map or {} @@ -203,34 +209,23 @@ def hex_to_rgb(value): colored_mask_img = _Image.fromarray((colored_mask).astype(np.uint8)) - mask_file = self.pil_to_temp_file( - colored_mask_img, dir=self.DEFAULT_TEMP_DIR + mask_file = processing_utils.save_pil_to_cache( + colored_mask_img, cache_dir=self.GRADIO_CACHE ) mask_file_path = str(utils.abspath(mask_file)) - self.temp_files.add(mask_file_path) - sections.append( - ({"name": mask_file_path, "data": None, "is_file": True}, label) + Annotation( + image=FileData(name=mask_file_path, is_file=True), label=label + ) ) - return {"name": base_img_path, "data": None, "is_file": True}, sections + return AnnotatedImageData( + image=FileData(name=base_img_path, data=None, is_file=True), + annotations=sections, + ) - def style( - self, - *, - height: int | None = None, - width: int | None = None, - color_map: dict[str, str] | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if height is not None: - self.height = height - if width is not None: - self.width = width - if color_map is not None: - self.color_map = color_map - return self + def example_inputs(self) -> Any: + return {} + + def preprocess(self, x: Any) -> Any: + return x diff --git a/gradio/components/audio.py b/gradio/components/audio.py index 8e78a7d8d468..7e786f1ee173 100644 --- a/gradio/components/audio.py +++ b/gradio/components/audio.py @@ -2,46 +2,33 @@ from __future__ import annotations -import tempfile import warnings from pathlib import Path from typing import Any, Callable, Literal import numpy as np import requests -from gradio_client import media_data from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import FileSerializable from gradio import processing_utils, utils -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - Clearable, - Playable, - Recordable, - Streamable, - StreamableOutput, - Uploadable, -) -from gradio.interpretation import TokenInterpretable +from gradio.components.base import Component, StreamingInput, StreamingOutput, _Keywords +from gradio.data_classes import FileData +from gradio.events import Events set_documentation_group("component") +class AudioInputData(FileData): + crop_min: int = 0 + crop_max: int = 100 + + @document() class Audio( - Changeable, - Clearable, - Playable, - Recordable, - Streamable, - StreamableOutput, - Uploadable, - IOComponent, - FileSerializable, - TokenInterpretable, + StreamingInput, + StreamingOutput, + Component, ): """ Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output). @@ -52,6 +39,21 @@ class Audio( Guides: real-time-speech-recognition """ + EVENTS = [ + Events.stream, + Events.change, + Events.clear, + Events.play, + Events.pause, + Events.stop, + Events.pause, + Events.start_recording, + Events.stop_recording, + Events.upload, + ] + + data_model = FileData + def __init__( self, value: str | Path | tuple[int, np.ndarray] | Callable | None = None, @@ -125,8 +127,7 @@ def __init__( else show_share_button ) self.show_edit_button = show_edit_button - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -140,13 +141,9 @@ def __init__( value=value, **kwargs, ) - TokenInterpretable.__init__(self) - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_AUDIO}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav", - } + def example_inputs(self) -> Any: + return "https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav" @staticmethod def update( @@ -195,33 +192,23 @@ def preprocess( """ if x is None: return x - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - crop_min, crop_max = x.get("crop_min", 0), x.get("crop_max", 100) - if is_file: - if client_utils.is_http_url_like(file_name): - temp_file_path = self.download_temp_copy_if_needed(file_name) - else: - temp_file_path = self.make_temp_copy_if_needed(file_name) - else: - temp_file_path = self.base64_to_temp_file_if_needed(file_data, file_name) - sample_rate, data = processing_utils.audio_from_file( - temp_file_path, crop_min=crop_min, crop_max=crop_max - ) + payload: AudioInputData = AudioInputData(**x) + assert payload.name # Need a unique name for the file to avoid re-using the same audio file if # a user submits the same audio file twice, but with different crop min/max. - temp_file_path = Path(temp_file_path) + temp_file_path = Path(payload.name) output_file_name = str( temp_file_path.with_name( - f"{temp_file_path.stem}-{crop_min}-{crop_max}{temp_file_path.suffix}" + f"{temp_file_path.stem}-{payload.crop_min}-{payload.crop_max}{temp_file_path.suffix}" ) ) + sample_rate, data = processing_utils.audio_from_file( + temp_file_path, crop_min=payload.crop_min, crop_max=payload.crop_max + ) + if self.type == "numpy": return sample_rate, data elif self.type == "filepath": @@ -237,91 +224,9 @@ def preprocess( + ". Please choose from: 'numpy', 'filepath'." ) - def set_interpret_parameters(self, segments: int = 8): - """ - Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a "leave one out" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value. - Parameters: - segments: Number of interpretation segments to split audio into. - """ - self.interpretation_segments = segments - return self - - def tokenize(self, x): - if x.get("is_file"): - sample_rate, data = processing_utils.audio_from_file(x["name"]) - else: - file_name = self.base64_to_temp_file_if_needed(x["data"]) - sample_rate, data = processing_utils.audio_from_file(file_name) - leave_one_out_sets = [] - tokens = [] - masks = [] - duration = data.shape[0] - boundaries = np.linspace(0, duration, self.interpretation_segments + 1).tolist() - boundaries = [round(boundary) for boundary in boundaries] - for index in range(len(boundaries) - 1): - start, stop = boundaries[index], boundaries[index + 1] - masks.append((start, stop)) - - # Handle the leave one outs - leave_one_out_data = np.copy(data) - leave_one_out_data[start:stop] = 0 - file = tempfile.NamedTemporaryFile( - delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR - ) - processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name) - out_data = client_utils.encode_file_to_base64(file.name) - leave_one_out_sets.append(out_data) - file.close() - Path(file.name).unlink() - - # Handle the tokens - token = np.copy(data) - token[0:start] = 0 - token[stop:] = 0 - file = tempfile.NamedTemporaryFile( - delete=False, suffix=".wav", dir=self.DEFAULT_TEMP_DIR - ) - processing_utils.audio_to_file(sample_rate, token, file.name) - token_data = client_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - - tokens.append(token_data) - tokens = [{"name": "token.wav", "data": token} for token in tokens] - leave_one_out_sets = [ - {"name": "loo.wav", "data": loo_set} for loo_set in leave_one_out_sets - ] - return tokens, leave_one_out_sets, masks - - def get_masked_inputs(self, tokens, binary_mask_matrix): - # create a "zero input" vector and get sample rate - x = tokens[0]["data"] - file_name = self.base64_to_temp_file_if_needed(x) - sample_rate, data = processing_utils.audio_from_file(file_name) - zero_input = np.zeros_like(data, dtype="int16") - # decode all of the tokens - token_data = [] - for token in tokens: - file_name = self.base64_to_temp_file_if_needed(token["data"]) - _, data = processing_utils.audio_from_file(file_name) - token_data.append(data) - # construct the masked version - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.copy(zero_input) - for t, b in zip(token_data, binary_mask_vector): - masked_input = masked_input + t * int(b) - file = tempfile.NamedTemporaryFile(delete=False, dir=self.DEFAULT_TEMP_DIR) - processing_utils.audio_to_file(sample_rate, masked_input, file.name) - masked_data = client_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - masked_inputs.append(masked_data) - return masked_inputs - def postprocess( self, y: tuple[int, np.ndarray] | str | Path | bytes | None - ) -> str | dict | bytes | None: + ) -> FileData | None | bytes: """ Parameters: y: audio data in either of the following formats: a tuple of (sample_rate, data), or a string filepath or URL to an audio file, or None. @@ -333,27 +238,23 @@ def postprocess( if isinstance(y, bytes): if self.streaming: return y - file_path = self.file_bytes_to_file(y, "audio") - elif isinstance(y, str) and client_utils.is_http_url_like(y): - return {"name": y, "data": None, "is_file": True} + file_path = processing_utils.save_bytes_to_cache( + y, "audio", cache_dir=self.GRADIO_CACHE + ) elif isinstance(y, tuple): sample_rate, data = y - file_path = self.audio_to_temp_file( - data, - sample_rate, - format=self.format, + file_path = processing_utils.save_audio_to_cache( + data, sample_rate, format=self.format, cache_dir=self.GRADIO_CACHE ) - self.temp_files.add(file_path) else: - file_path = self.make_temp_copy_if_needed(y) - return { - "name": file_path, - "data": None, - "is_file": True, - "orig_name": Path(file_path).name, - } + if not isinstance(y, (str, Path)): + raise ValueError(f"Cannot process {y} as Audio") + file_path = str(y) + return FileData(**{"name": file_path, "data": None, "is_file": True}) - def stream_output(self, y, output_id: str, first_chunk: bool): + def stream_output( + self, y, output_id: str, first_chunk: bool + ) -> tuple[bytes | None, Any]: output_file = { "name": output_id, "is_stream": True, @@ -385,11 +286,11 @@ def stream_output(self, y, output_id: str, first_chunk: bool): binary_data = binary_data[44:] return binary_data, output_file + def as_example(self, input_data: str | None) -> str: + return Path(input_data).name if input_data else "" + def check_streamable(self): - if self.source != "microphone": + if self.source != "microphone" and self.streaming: raise ValueError( "Audio streaming only available if source is 'microphone'." ) - - def as_example(self, input_data: str | None) -> str: - return Path(input_data).name if input_data else "" diff --git a/gradio/components/bar_plot.py b/gradio/components/bar_plot.py index f16bd4c379e1..06bf0e0ecb31 100644 --- a/gradio/components/bar_plot.py +++ b/gradio/components/bar_plot.py @@ -3,14 +3,14 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal import altair as alt import pandas as pd from gradio_client.documentation import document, set_documentation_group from gradio.components.base import _Keywords -from gradio.components.plot import AltairPlot, Plot +from gradio.components.plot import AltairPlot, AltairPlotData, Plot set_documentation_group("component") @@ -26,6 +26,8 @@ class BarPlot(Plot): Demos: bar_plot, chicago-bikeshare-dashboard """ + data_model = AltairPlotData + def __init__( self, value: pd.DataFrame | Callable | None = None, @@ -339,8 +341,8 @@ def create_plot( y, # type: ignore title=y_title, # type: ignore scale=AltairPlot.create_scale(y_lim), # type: ignore - axis=alt.Axis(labelAngle=x_label_angle) - if x_label_angle is not None + axis=alt.Axis(labelAngle=y_label_angle) + if y_label_angle is not None else alt.Axis(), sort=sort if not vertical and sort is not None else None, ), @@ -380,7 +382,9 @@ def create_plot( return chart - def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: + def postprocess( + self, y: pd.DataFrame | dict | None + ) -> AltairPlotData | dict | None: # if None or update if y is None or isinstance(y, dict): return y @@ -409,4 +413,12 @@ def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: sort=self.sort, # type: ignore ) - return {"type": "altair", "plot": chart.to_json(), "chart": "bar"} + return AltairPlotData( + **{"type": "altair", "plot": chart.to_json(), "chart": "bar"} + ) + + def example_inputs(self) -> dict[str, Any]: + return {} + + def preprocess(self, x: Any) -> Any: + return x diff --git a/gradio/components/base.py b/gradio/components/base.py index 4f0b1cf1fed0..5327dcdce685 100644 --- a/gradio/components/base.py +++ b/gradio/components/base.py @@ -4,34 +4,27 @@ from __future__ import annotations +import abc import hashlib +import json import os -import secrets -import shutil +import sys import tempfile -import urllib.request +from abc import ABC, abstractmethod from enum import Enum from pathlib import Path from typing import TYPE_CHECKING, Any, Callable -import aiofiles -import numpy as np -import requests -from fastapi import UploadFile -from gradio_client import utils as client_utils from gradio_client.documentation import set_documentation_group -from gradio_client.serializing import ( - Serializable, -) from PIL import Image as _Image # using _ to minimize namespace pollution -from gradio import processing_utils, utils -from gradio.blocks import Block, BlockContext, Updateable -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import ( - EventListener, -) -from gradio.layouts import Column, Form, Row +from gradio import utils +from gradio.blocks import Block, BlockContext +from gradio.component_meta import ComponentMeta +from gradio.data_classes import GradioDataModel +from gradio.deprecation import warn_deprecation +from gradio.events import EventListener +from gradio.layouts import Form if TYPE_CHECKING: from typing import TypedDict @@ -50,78 +43,82 @@ class _Keywords(Enum): FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state) -class Component(Updateable, Block, Serializable): - """ - A base class for defining the methods that all gradio components should have. - """ - - def __init__(self, *args, **kwargs): - Block.__init__(self, *args, **kwargs) - EventListener.__init__(self) - self.server_fns = [ - value - for value in self.__class__.__dict__.values() - if callable(value) and getattr(value, "_is_server_fn", False) - ] - - def __str__(self): - return self.__repr__() - - def __repr__(self): - return f"{self.get_block_name()}" +class ComponentBase(ABC, metaclass=ComponentMeta): + EVENTS: list[EventListener | str] = [] + @abstractmethod def preprocess(self, x: Any) -> Any: """ Any preprocessing needed to be performed on function input. """ return x + @abstractmethod def postprocess(self, y): """ Any postprocessing needed to be performed on function output. """ return y - def style(self, *args, **kwargs): + @abstractmethod + def as_example(self, y): """ - This method is deprecated. Please set these arguments in the Components constructor instead. + Return the input data in a way that can be displayed by the examples dataset component in the front-end. + + For example, only return the name of a file as opposed to a full path. Or get the head of a dataframe. + Must be able to be converted to a string to put in the config. """ - warn_style_method_deprecation() - put_deprecated_params_in_box = False - if "rounded" in kwargs: - warn_deprecation( - "'rounded' styling is no longer supported. To round adjacent components together, place them in a Column(variant='box')." - ) - if isinstance(kwargs["rounded"], (list, tuple)): - put_deprecated_params_in_box = True - kwargs.pop("rounded") - if "margin" in kwargs: - warn_deprecation( - "'margin' styling is no longer supported. To place adjacent components together without margin, place them in a Column(variant='box')." - ) - if isinstance(kwargs["margin"], (list, tuple)): - put_deprecated_params_in_box = True - kwargs.pop("margin") - if "border" in kwargs: - warn_deprecation( - "'border' styling is no longer supported. To place adjacent components in a shared border, place them in a Column(variant='box')." - ) - kwargs.pop("border") - for key in kwargs: - warn_deprecation(f"Unknown style parameter: {key}") - if ( - put_deprecated_params_in_box - and isinstance(self.parent, (Row, Column)) - and self.parent.variant == "default" - ): - self.parent.variant = "compact" - return self + pass - def get_config(self): - config = super().get_config() - if len(self.server_fns): - config["server_fns"] = [fn.__name__ for fn in self.server_fns] - return config + @abstractmethod + def api_info(self) -> dict[str, list[str]]: + """ + The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. + Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output + """ + pass + + @abstractmethod + def example_inputs(self) -> Any: + """ + The example inputs for this component as a dictionary whose values are example inputs compatible with this component. + Keys of the dictionary are: raw, serialized + """ + pass + + @abstractmethod + def flag(self, x: Any | GradioDataModel, flag_dir: str | Path = "") -> str: + """ + Write the component's value to a format that can be stored in a csv or jsonl format for flagging. + """ + pass + + @abstractmethod + def read_from_flag( + self, + x: Any, + flag_dir: str | Path | None = None, + ) -> GradioDataModel | Any: + """ + Convert the data from the csv or jsonl file into the component state. + """ + return x + + @property + @abstractmethod + def skip_api(self): + """Whether this component should be skipped from the api return value""" + + @classmethod + def has_event(cls, event: str | EventListener) -> bool: + return event in cls.EVENTS + + @classmethod + def get_component_class_id(cls) -> str: + module_name = cls.__module__ + module_path = sys.modules[module_name].__file__ + module_hash = hashlib.md5(f"{cls.__name__}_{module_path}".encode()).hexdigest() + return module_hash def server(fn): @@ -129,7 +126,7 @@ def server(fn): return fn -class IOComponent(Component): +class Component(ComponentBase, Block): """ A base class for defining methods that all input/output components should have. """ @@ -152,14 +149,27 @@ def __init__( every: float | None = None, **kwargs, ): + self.server_fns = [ + value + for value in self.__class__.__dict__.values() + if callable(value) and getattr(value, "_is_server_fn", False) + ] + + # This gets overriden when `select` is called + + self.selectable = False + if not hasattr(self, "data_model"): + self.data_model: type[GradioDataModel] | None = None self.temp_files: set[str] = set() - self.DEFAULT_TEMP_DIR = os.environ.get("GRADIO_TEMP_DIR") or str( + self.GRADIO_CACHE = os.environ.get("GRADIO_TEMP_DIR") or str( Path(tempfile.gettempdir()) / "gradio" ) - Component.__init__( + Block.__init__( self, elem_id=elem_id, elem_classes=elem_classes, visible=visible, **kwargs ) + if isinstance(self, StreamingInput): + self.check_streamable() self.label = label self.info = info @@ -181,7 +191,7 @@ def __init__( # load_event is set in the Blocks.attach_load_events method self.load_event: None | dict[str, Any] = None - self.load_event_to_attach = None + self.load_event_to_attach: None | tuple[Callable, float | None] = None load_fn, initial_value = self.get_load_fn_and_initial_value(value) self.value = ( initial_value @@ -191,160 +201,22 @@ def __init__( if callable(load_fn): self.attach_load_event(load_fn, every) - @staticmethod - def hash_file(file_path: str | Path, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - with open(file_path, "rb") as f: - for chunk in iter(lambda: f.read(chunk_num_blocks * sha1.block_size), b""): - sha1.update(chunk) - return sha1.hexdigest() + self.component_class_id = self.__class__.get_component_class_id() - @staticmethod - def hash_url(url: str, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - remote = urllib.request.urlopen(url) - max_file_size = 100 * 1024 * 1024 # 100MB - total_read = 0 - while True: - data = remote.read(chunk_num_blocks * sha1.block_size) - total_read += chunk_num_blocks * sha1.block_size - if not data or total_read > max_file_size: - break - sha1.update(data) - return sha1.hexdigest() + TEMPLATE_DIR = "./templates/" + FRONTEND_DIR = "../../frontend/" - @staticmethod - def hash_bytes(bytes: bytes): - sha1 = hashlib.sha1() - sha1.update(bytes) - return sha1.hexdigest() + def get_config(self): + config = super().get_config() + if self.info: + config["info"] = self.info + if len(self.server_fns): + config["server_fns"] = [fn.__name__ for fn in self.server_fns] + return config - @staticmethod - def hash_base64(base64_encoding: str, chunk_num_blocks: int = 128) -> str: - sha1 = hashlib.sha1() - for i in range(0, len(base64_encoding), chunk_num_blocks * sha1.block_size): - data = base64_encoding[i : i + chunk_num_blocks * sha1.block_size] - sha1.update(data.encode("utf-8")) - return sha1.hexdigest() - - def make_temp_copy_if_needed(self, file_path: str | Path) -> str: - """Returns a temporary file path for a copy of the given file path if it does - not already exist. Otherwise returns the path to the existing temp file.""" - temp_dir = self.hash_file(file_path) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - name = client_utils.strip_invalid_filename_characters(Path(file_path).name) - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - if not Path(full_temp_file_path).exists(): - shutil.copy2(file_path, full_temp_file_path) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - async def save_uploaded_file(self, file: UploadFile, upload_dir: str) -> str: - temp_dir = secrets.token_hex( - 20 - ) # Since the full file is being uploaded anyways, there is no benefit to hashing the file. - temp_dir = Path(upload_dir) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - if file.filename: - file_name = Path(file.filename).name - name = client_utils.strip_invalid_filename_characters(file_name) - else: - name = f"tmp{secrets.token_hex(5)}" - - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - async with aiofiles.open(full_temp_file_path, "wb") as output_file: - while True: - content = await file.read(100 * 1024 * 1024) - if not content: - break - await output_file.write(content) - - return full_temp_file_path - - def download_temp_copy_if_needed(self, url: str) -> str: - """Downloads a file and makes a temporary file path for a copy if does not already - exist. Otherwise returns the path to the existing temp file.""" - temp_dir = self.hash_url(url) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - name = client_utils.strip_invalid_filename_characters(Path(url).name) - full_temp_file_path = str(utils.abspath(temp_dir / name)) - - if not Path(full_temp_file_path).exists(): - with requests.get(url, stream=True) as r, open( - full_temp_file_path, "wb" - ) as f: - shutil.copyfileobj(r.raw, f) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - def base64_to_temp_file_if_needed( - self, base64_encoding: str, file_name: str | None = None - ) -> str: - """Converts a base64 encoding to a file and returns the path to the file if - the file doesn't already exist. Otherwise returns the path to the existing file. - """ - temp_dir = self.hash_base64(base64_encoding) - temp_dir = Path(self.DEFAULT_TEMP_DIR) / temp_dir - temp_dir.mkdir(exist_ok=True, parents=True) - - guess_extension = client_utils.get_extension(base64_encoding) - if file_name: - file_name = client_utils.strip_invalid_filename_characters(file_name) - elif guess_extension: - file_name = f"file.{guess_extension}" - else: - file_name = "file" - - full_temp_file_path = str(utils.abspath(temp_dir / file_name)) # type: ignore - - if not Path(full_temp_file_path).exists(): - data, _ = client_utils.decode_base64_to_binary(base64_encoding) - with open(full_temp_file_path, "wb") as fb: - fb.write(data) - - self.temp_files.add(full_temp_file_path) - return full_temp_file_path - - def pil_to_temp_file(self, img: _Image.Image, dir: str, format="png") -> str: - bytes_data = processing_utils.encode_pil_to_bytes(img, format) - temp_dir = Path(dir) / self.hash_bytes(bytes_data) - temp_dir.mkdir(exist_ok=True, parents=True) - filename = str(temp_dir / f"image.{format}") - img.save(filename, pnginfo=processing_utils.get_pil_metadata(img)) - return filename - - def img_array_to_temp_file(self, arr: np.ndarray, dir: str) -> str: - if arr.ndim not in (2, 3, 4): - raise ValueError( - "Input does not have the correct number of dimensions (2 for grayscale, 3 for RGB, 4 for RGBA)" - ) - pil_image = _Image.fromarray( - processing_utils._convert(arr, np.uint8, force_copy=False) - ) - return self.pil_to_temp_file(pil_image, dir, format="png") - - def audio_to_temp_file(self, data: np.ndarray, sample_rate: int, format: str): - temp_dir = Path(self.DEFAULT_TEMP_DIR) / self.hash_bytes(data.tobytes()) - temp_dir.mkdir(exist_ok=True, parents=True) - filename = str(temp_dir / f"audio.{format}") - processing_utils.audio_to_file(sample_rate, data, filename, format=format) - return filename - - def file_bytes_to_file(self, data: bytes, file_name: str): - path = Path(self.DEFAULT_TEMP_DIR) / self.hash_bytes(data) - path.mkdir(exist_ok=True, parents=True) - path = path / Path(file_name).name - path.write_bytes(data) - return path + @property + def skip_api(self): + return False @staticmethod def get_load_fn_and_initial_value(value): @@ -356,6 +228,12 @@ def get_load_fn_and_initial_value(value): load_fn = None return load_fn, initial_value + def __str__(self): + return self.__repr__() + + def __repr__(self): + return f"{self.get_block_name()}" + def attach_load_event(self, callable: Callable, every: float | None): """Add a load event that runs `callable`, optionally every `every` seconds.""" self.load_event_to_attach = (callable, every) @@ -364,18 +242,77 @@ def as_example(self, input_data): """Return the input data in a way that can be displayed by the examples dataset component in the front-end.""" return input_data + def api_info(self) -> dict[str, Any]: + """ + The typing information for this component as a dictionary whose values are a list of 2 strings: [Python type, language-agnostic description]. + Keys of the dictionary are: raw_input, raw_output, serialized_input, serialized_output + """ + if self.data_model is not None: + return self.data_model.model_json_schema() + raise NotImplementedError( + f"The api_info method has not been implemented for {self.get_block_name()}" + ) + + def flag(self, x: Any, flag_dir: str | Path = "") -> str: + """ + Write the component's value to a format that can be stored in a csv or jsonl format for flagging. + """ + if self.data_model: + x = self.data_model.from_json(x) + return x.copy_to_dir(flag_dir).model_dump_json() + return x + + def read_from_flag( + self, + x: Any, + flag_dir: str | Path | None = None, + ): + """ + Convert the data from the csv or jsonl file into the component state. + """ + if self.data_model: + return self.data_model.from_json(json.loads(x)) + return x + -class FormComponent: +class FormComponent(Component): def get_expected_parent(self) -> type[Form] | None: if getattr(self, "container", None) is False: return None return Form + def preprocess(self, x: Any) -> Any: + return x + + def postprocess(self, y): + return y + + +class StreamingOutput(metaclass=abc.ABCMeta): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.streaming: bool + + @abc.abstractmethod + def stream_output(self, y, output_id: str, first_chunk: bool) -> tuple[bytes, Any]: + pass + + +class StreamingInput(metaclass=abc.ABCMeta): + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + + @abc.abstractmethod + def check_streamable(self): + """Used to check if streaming is supported given the input.""" + pass + def component(cls_name: str, render: bool) -> Component: obj = utils.component_or_layout_class(cls_name)(render=render) if isinstance(obj, BlockContext): raise ValueError(f"Invalid component: {obj.__class__}") + assert isinstance(obj, Component) return obj @@ -403,8 +340,10 @@ def get_component_instance( raise ValueError( f"Component must provided as a `str` or `dict` or `Component` but is {comp}" ) + if render and not component_obj.is_rendered: component_obj.render() elif unrender and component_obj.is_rendered: component_obj.unrender() + assert isinstance(component_obj, Component) return component_obj diff --git a/gradio/components/button.py b/gradio/components/button.py index e64db3827b2e..9eba44b5b740 100644 --- a/gradio/components/button.py +++ b/gradio/components/button.py @@ -3,20 +3,19 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Clickable +from gradio.events import Events set_documentation_group("component") @document() -class Button(Clickable, IOComponent, StringSerializable): +class Button(Component): """ Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function. @@ -25,6 +24,8 @@ class Button(Clickable, IOComponent, StringSerializable): Demos: blocks_inputs, blocks_kinematics """ + EVENTS = [Events.click] + def __init__( self, value: str | Callable = "Run", @@ -55,8 +56,7 @@ def __init__( scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. """ - IOComponent.__init__( - self, + super().__init__( visible=visible, elem_id=elem_id, elem_classes=elem_classes, @@ -74,6 +74,10 @@ def __init__( self.icon = icon self.link = link + @property + def skip_api(self): + return True + @staticmethod def update( value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, @@ -122,3 +126,12 @@ def style( if size is not None: self.size = size return self + + def preprocess(self, x: Any) -> Any: + return x + + def postprocess(self, y): + return y + + def example_inputs(self) -> Any: + return None diff --git a/gradio/components/carousel.py b/gradio/components/carousel.py index 00a064420f13..edff00725d14 100644 --- a/gradio/components/carousel.py +++ b/gradio/components/carousel.py @@ -1,12 +1,9 @@ """gr.Carousel() component.""" -from gradio_client.serializing import SimpleSerializable +from gradio.components.base import Component -from gradio.components.base import IOComponent -from gradio.events import Changeable - -class Carousel(IOComponent, Changeable, SimpleSerializable): +class Carousel(Component): """ Deprecated Component """ diff --git a/gradio/components/chatbot.py b/gradio/components/chatbot.py index 447d42d99c1d..2d14cdbb8fff 100644 --- a/gradio/components/chatbot.py +++ b/gradio/components/chatbot.py @@ -5,27 +5,38 @@ import inspect import warnings from pathlib import Path -from typing import Callable, Literal +from typing import Any, Callable, List, Literal, Optional, Tuple, Union from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable from gradio import utils -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData, GradioModel, GradioRootModel from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import ( - Changeable, - EventListenerMethod, - Likeable, - Selectable, -) +from gradio.events import Events + +# from pydantic import Field, TypeAdapter set_documentation_group("component") +class FileMessage(GradioModel): + file: FileData + alt_text: Optional[str] = None + + +# _Message = Annotated[List[Union[str, FileMessage, None]], Field(min_length=2, max_length=2)] + +# Message = TypeAdapter(_Message) + + +class ChatbotData(GradioRootModel): + root: List[Tuple[Union[str, FileMessage, None], Union[str, FileMessage, None]]] + + @document() -class Chatbot(Changeable, Selectable, Likeable, IOComponent, JSONSerializable): +class Chatbot(Component): """ Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, tables. Also supports audio/video/image files, which are displayed in the Chatbot, and other kinds of files which are displayed as links. Preprocessing: passes the messages in the Chatbot as a {List[List[str | None | Tuple]]}, i.e. a list of lists. The inner list has 2 elements: the user message and the response message. See `Postprocessing` for the format of these messages. @@ -35,6 +46,9 @@ class Chatbot(Changeable, Selectable, Likeable, IOComponent, JSONSerializable): Guides: creating-a-chatbot """ + EVENTS = [Events.change, Events.select, Events.like] + data_model = ChatbotData + def __init__( self, value: list[list[str | tuple[str] | tuple[str | Path, str] | None]] @@ -91,18 +105,12 @@ def __init__( """ if color_map is not None: warn_deprecation("The 'color_map' parameter has been deprecated.") - self.select: EventListenerMethod """ Event listener for when the user selects message from Chatbot. Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index. See EventData documentation on how to use this event data. """ - self.like: EventListenerMethod - """ - Event listener for when the user likes or dislikes a message from Chatbot. - Uses event data gradio.LikeData to carry `value` referring to text of selected message, `index` tuple to refer to [message, participant] index, and `liked` bool which is True if the item was liked, False if disliked. - See EventData documentation on how to use this event data. - """ + self.likeable = False self.height = height self.rtl = rtl if latex_delimiters is None: @@ -120,8 +128,8 @@ def __init__( self.bubble_full_width = bubble_full_width self.line_breaks = line_breaks self.layout = layout - IOComponent.__init__( - self, + + super().__init__( label=label, every=every, show_label=show_label, @@ -190,10 +198,10 @@ def _preprocess_chat_messages( if chat_message is None: return None elif isinstance(chat_message, dict): - if chat_message["alt_text"] is not None: - return (chat_message["name"], chat_message["alt_text"]) + if chat_message.get("alt_text"): + return (chat_message["file"]["name"], chat_message["alt_text"]) else: - return (chat_message["name"],) + return (chat_message["file"]["name"],) else: # string return chat_message @@ -223,24 +231,17 @@ def preprocess( def _postprocess_chat_messages( self, chat_message: str | tuple | list | None - ) -> str | dict | None: + ) -> str | FileMessage | None: if chat_message is None: return None elif isinstance(chat_message, (tuple, list)): - file_uri = str(chat_message[0]) - if utils.validate_url(file_uri): - filepath = file_uri - else: - filepath = self.make_temp_copy_if_needed(file_uri) + filepath = str(chat_message[0]) mime_type = client_utils.get_mimetype(filepath) - return { - "name": filepath, - "mime_type": mime_type, - "alt_text": chat_message[1] if len(chat_message) > 1 else None, - "data": None, # These last two fields are filled in by the frontend - "is_file": True, - } + return FileMessage( + file=FileData(name=filepath, is_file=True, mime_type=mime_type), + alt_text=chat_message[1] if len(chat_message) > 1 else None, + ) elif isinstance(chat_message, str): chat_message = inspect.cleandoc(chat_message) return chat_message @@ -250,7 +251,7 @@ def _postprocess_chat_messages( def postprocess( self, y: list[list[str | tuple[str] | tuple[str, str] | None] | tuple], - ) -> list[list[str | dict | None]]: + ) -> ChatbotData: """ Parameters: y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string or pathlib.Path filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed. @@ -258,7 +259,7 @@ def postprocess( List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed. """ if y is None: - return [] + return ChatbotData(root=[]) processed_messages = [] for message_pair in y: if not isinstance(message_pair, (tuple, list)): @@ -275,7 +276,7 @@ def postprocess( self._postprocess_chat_messages(message_pair[1]), ] ) - return processed_messages + return ChatbotData(root=processed_messages) def style(self, height: int | None = None, **kwargs): """ @@ -285,3 +286,6 @@ def style(self, height: int | None = None, **kwargs): if height is not None: self.height = height return self + + def example_inputs(self) -> Any: + return [["Hello!", None]] diff --git a/gradio/components/checkbox.py b/gradio/components/checkbox.py index 4394bb84f8fc..37d019759707 100644 --- a/gradio/components/checkbox.py +++ b/gradio/components/checkbox.py @@ -3,28 +3,18 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import BooleanSerializable -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable -from gradio.interpretation import NeighborInterpretable +from gradio.components.base import FormComponent, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class Checkbox( - FormComponent, - Changeable, - Inputable, - Selectable, - IOComponent, - BooleanSerializable, - NeighborInterpretable, -): +class Checkbox(FormComponent): """ Creates a checkbox that can be set to `True` or `False`. @@ -34,6 +24,8 @@ class Checkbox( Demos: sentence_builder, titanic_survival """ + EVENTS = [Events.change, Events.input, Events.select] + def __init__( self, value: bool | Callable = False, @@ -66,14 +58,7 @@ def __init__( elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ - self.select: EventListenerMethod - """ - Event listener for when the user selects or deselects Checkbox. - Uses event data gradio.SelectData to carry `value` referring to label of checkbox, and `selected` to refer to state of checkbox. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -88,7 +73,6 @@ def __init__( value=value, **kwargs, ) - NeighborInterpretable.__init__(self) @staticmethod def update( @@ -130,3 +114,9 @@ def get_interpretation_scores(self, x, neighbors, scores, **kwargs): return scores[0], None else: return None, scores[0] + + def api_info(self) -> dict[str, Any]: + return {"type": "boolean"} + + def example_inputs(self) -> bool: + return True diff --git a/gradio/components/checkboxgroup.py b/gradio/components/checkboxgroup.py index deaa59c9a458..7805762d1d3b 100644 --- a/gradio/components/checkboxgroup.py +++ b/gradio/components/checkboxgroup.py @@ -6,26 +6,15 @@ from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import ListStringSerializable -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable -from gradio.interpretation import NeighborInterpretable +from gradio.components.base import FormComponent, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class CheckboxGroup( - FormComponent, - Changeable, - Inputable, - Selectable, - IOComponent, - ListStringSerializable, - NeighborInterpretable, -): +class CheckboxGroup(FormComponent): """ Creates a set of checkboxes of which a subset can be checked. Preprocessing: passes the list of checked checkboxes as a {List[str | int | float]} or their indices as a {List[int]} into the function, depending on `type`. @@ -34,6 +23,8 @@ class CheckboxGroup( Demos: sentence_builder, titanic_survival """ + EVENTS = [Events.change, Events.input, Events.select] + def __init__( self, choices: list[str | int | float | tuple[str, str | int | float]] | None = None, @@ -83,14 +74,7 @@ def __init__( f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" ) self.type = type - self.select: EventListenerMethod - """ - Event listener for when the user selects or deselects within CheckboxGroup. - Uses event data gradio.SelectData to carry `value` referring to label of selected checkbox, `index` to refer to index, and `selected` to refer to state of checkbox. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -105,12 +89,15 @@ def __init__( value=value, **kwargs, ) - NeighborInterpretable.__init__(self) - def example_inputs(self) -> dict[str, Any]: + def example_inputs(self) -> Any: + return [self.choices[0][1]] if self.choices else None + + def api_info(self) -> dict[str, Any]: return { - "raw": [self.choices[0][1]] if self.choices else None, - "serialized": [self.choices[0][1]] if self.choices else None, + "items": {"enum": [c[1] for c in self.choices], "type": "string"}, + "title": "Checkbox Group", + "type": "array", } @staticmethod @@ -188,45 +175,6 @@ def postprocess( y = [y] return y - def get_interpretation_neighbors(self, x): - leave_one_out_sets = [] - for choice in [value for _, value in self.choices]: - leave_one_out_set = list(x) - if choice in leave_one_out_set: - leave_one_out_set.remove(choice) - else: - leave_one_out_set.append(choice) - leave_one_out_sets.append(leave_one_out_set) - return leave_one_out_sets, {} - - def get_interpretation_scores(self, x, neighbors, scores, **kwargs): - """ - Returns: - For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True. - """ - final_scores = [] - for choice, score in zip([value for _, value in self.choices], scores): - score_set = [score, None] if choice in x else [None, score] - final_scores.append(score_set) - return final_scores - - def style( - self, - *, - item_container: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if item_container is not None: - warn_deprecation("The `item_container` parameter is deprecated.") - if container is not None: - self.container = container - return self - def as_example(self, input_data): if input_data is None: return None diff --git a/gradio/components/clear_button.py b/gradio/components/clear_button.py index 33d122e12c9d..d3f25fbcd1bf 100644 --- a/gradio/components/clear_button.py +++ b/gradio/components/clear_button.py @@ -3,7 +3,7 @@ from __future__ import annotations import json -from typing import Literal +from typing import Any, Literal from gradio_client.documentation import document, set_documentation_group @@ -67,8 +67,19 @@ def add(self, components: None | Component | list[Component]) -> ClearButton: if isinstance(components, Component): components = [components] - clear_values = json.dumps( - [component.postprocess(None) for component in components] - ) + none_values = [] + for component in components: + none = component.postprocess(None) + none_values.append(none) + clear_values = json.dumps(none_values) self.click(None, [], components, _js=f"() => {clear_values}") return self + + def postprocess(self, y): + return y + + def preprocess(self, x: Any) -> Any: + return x + + def example_inputs(self) -> Any: + return None diff --git a/gradio/components/code.py b/gradio/components/code.py index cbc5d7f2339d..3bd90f38ebd9 100644 --- a/gradio/components/code.py +++ b/gradio/components/code.py @@ -3,19 +3,19 @@ from __future__ import annotations import warnings -from typing import Literal +from pathlib import Path +from typing import Any, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import IOComponent, _Keywords -from gradio.events import Changeable, Inputable +from gradio.components.base import Component, _Keywords +from gradio.events import Events set_documentation_group("component") -@document() -class Code(Changeable, Inputable, IOComponent, StringSerializable): +@document("languages") +class Code(Component): """ Creates a Code editor for entering, editing or viewing code. Preprocessing: passes a {str} of code into the function. @@ -37,6 +37,8 @@ class Code(Changeable, Inputable, IOComponent, StringSerializable): None, ] + EVENTS = [Events.change, Events.input] + def __init__( self, value: str | tuple[str] | None = None, @@ -86,8 +88,7 @@ def __init__( self.language = language self.lines = lines - IOComponent.__init__( - self, + super().__init__( label=label, interactive=interactive, show_label=show_label, @@ -101,7 +102,10 @@ def __init__( **kwargs, ) - def postprocess(self, y): + def preprocess(self, x: Any) -> Any: + return x + + def postprocess(self, y: tuple | str | None) -> None | str: if y is None: return None elif isinstance(y, tuple): @@ -110,6 +114,15 @@ def postprocess(self, y): else: return y.strip() + def flag(self, x: Any, flag_dir: str | Path = "") -> str: + return super().flag(x, flag_dir) + + def api_info(self) -> dict[str, Any]: + return {"type": "string"} + + def example_inputs(self) -> Any: + return "print('Hello World')" + @staticmethod def update( value: str diff --git a/gradio/components/color_picker.py b/gradio/components/color_picker.py index f8acd134f32f..e894c2b78ca5 100644 --- a/gradio/components/color_picker.py +++ b/gradio/components/color_picker.py @@ -3,26 +3,19 @@ from __future__ import annotations import warnings +from pathlib import Path from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - Focusable, - Inputable, - Submittable, -) +from gradio.components.base import Component, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class ColorPicker( - Changeable, Inputable, Submittable, Focusable, IOComponent, StringSerializable -): +class ColorPicker(Component): """ Creates a color picker for user to select a color as string input. Preprocessing: passes selected color value as a {str} into the function. @@ -31,6 +24,8 @@ class ColorPicker( Demos: color_picker, color_generator """ + EVENTS = [Events.change, Events.input, Events.submit, Events.focus, Events.blur] + def __init__( self, value: str | Callable | None = None, @@ -63,8 +58,7 @@ def __init__( elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -80,11 +74,17 @@ def __init__( **kwargs, ) - def example_inputs(self) -> dict[str, Any]: - return { - "raw": "#000000", - "serialized": "#000000", - } + def example_inputs(self) -> str: + return "#000000" + + def flag(self, x: Any, flag_dir: str | Path = "") -> str: + return x + + def read_from_flag(self, x: Any, flag_dir: str | Path | None = None): + return x + + def api_info(self) -> dict[str, Any]: + return {"type": "string"} @staticmethod def update( diff --git a/gradio/components/dataframe.py b/gradio/components/dataframe.py index b028d0681417..1b8cf73b959e 100644 --- a/gradio/components/dataframe.py +++ b/gradio/components/dataframe.py @@ -3,40 +3,32 @@ from __future__ import annotations import warnings -from dataclasses import asdict, dataclass -from typing import Callable, Literal +from typing import Any, Callable, Dict, List, Literal, Optional import numpy as np import pandas as pd import semantic_version from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable from pandas.io.formats.style import Styler -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - EventListenerMethod, - Inputable, - Selectable, -) +from gradio.components import Component, _Keywords +from gradio.data_classes import GradioModel +from gradio.events import Events -set_documentation_group("component") +class DataframeData(GradioModel): + headers: List[str] + data: List[List[Any]] + metadata: Optional[ + Dict[str, List[Any]] + ] = None # Optional[Dict[str, List[Any]]] = None -@dataclass -class DataframeData: - """ - This is a dataclass to represent all the data that is sent to or received from the frontend. - """ - data: list[list[str | int | bool]] - headers: list[str] | list[int] | None = None - metadata: dict[str, list[list]] | None = None +set_documentation_group("component") @document() -class Dataframe(Changeable, Inputable, Selectable, IOComponent, JSONSerializable): +class Dataframe(Component): """ Accepts or displays 2D input through a spreadsheet-like component for dataframes. Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, or {List[List]} depending on `type` @@ -45,6 +37,10 @@ class Dataframe(Changeable, Inputable, Selectable, IOComponent, JSONSerializable Demos: filter_records, matrix_transpose, tax_calculator """ + EVENTS = [Events.change, Events.input, Events.select] + + data_model = DataframeData + def __init__( self, value: pd.DataFrame @@ -116,7 +112,9 @@ def __init__( self.__validate_headers(headers, self.col_count[0]) self.headers = ( - headers if headers is not None else list(range(1, self.col_count[0] + 1)) + headers + if headers is not None + else [str(i) for i in (range(1, self.col_count[0] + 1))] ) self.datatype = ( datatype if isinstance(datatype, list) else [datatype] * self.col_count[0] @@ -138,9 +136,13 @@ def __init__( column_dtypes = ( [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype ) - self.empty_input = [ - [values[c] for c in column_dtypes] for _ in range(self.row_count[0]) - ] + self.empty_input = { + "headers": self.headers, + "data": [ + [values[c] for c in column_dtypes] for _ in range(self.row_count[0]) + ], + "metadata": None, + } self.max_rows = max_rows self.max_cols = max_cols @@ -150,15 +152,7 @@ def __init__( self.latex_delimiters = latex_delimiters self.height = height self.line_breaks = line_breaks - - self.select: EventListenerMethod - """ - Event listener for when the user selects cell within Dataframe. - Uses event data gradio.SelectData to carry `value` referring to value of selected cell, and `index` tuple to refer to index row and column. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -241,7 +235,7 @@ def preprocess(self, x: dict) -> pd.DataFrame | np.ndarray | list: def postprocess( self, y: pd.DataFrame | Styler | np.ndarray | list | list[list] | dict | str | None, - ) -> dict: + ) -> DataframeData | dict: """ Parameters: y: dataframe in given format @@ -251,7 +245,14 @@ def postprocess( if y is None: return self.postprocess(self.empty_input) if isinstance(y, dict): - value = DataframeData(**y) + return y + if isinstance(y, (str, pd.DataFrame)): + if isinstance(y, str): + y = pd.read_csv(y) # type: ignore + return DataframeData( + headers=list(y.columns), # type: ignore + data=y.to_dict(orient="split")["data"], # type: ignore + ) elif isinstance(y, Styler): if semantic_version.Version(pd.__version__) < semantic_version.Version( "1.5.0" @@ -266,14 +267,14 @@ def postprocess( df: pd.DataFrame = y.data # type: ignore value = DataframeData( headers=list(df.columns), - data=df.to_dict(orient="split")["data"], + data=df.to_dict(orient="split")["data"], # type: ignore metadata=self.__extract_metadata(y), ) elif isinstance(y, (str, pd.DataFrame)): - df = pd.read_csv(y) if isinstance(y, str) else y + df = pd.read_csv(y) if isinstance(y, str) else y # type: ignore value = DataframeData( headers=list(df.columns), - data=df.to_dict(orient="split")["data"], + data=df.to_dict(orient="split")["data"], # type: ignore ) elif isinstance(y, (np.ndarray, list)): if len(y) == 0: @@ -285,20 +286,17 @@ def postprocess( _headers = self.headers if len(self.headers) < len(y[0]): - _headers = [ + _headers: list[str] = [ *self.headers, - *list(range(len(self.headers) + 1, len(y[0]) + 1)), + *[str(i) for i in range(len(self.headers) + 1, len(y[0]) + 1)], ] elif len(self.headers) > len(y[0]): _headers = self.headers[: len(y[0])] - value = DataframeData( - headers=_headers, - data=y, - ) + value = DataframeData(headers=_headers, data=y) else: - raise ValueError(f"Cannot process value as a Dataframe: {y}") - return asdict(value) + raise ValueError("Cannot process value as a Dataframe") + return value @staticmethod def __get_cell_style(cell_id: str, cell_styles: list[dict]) -> str: @@ -354,3 +352,6 @@ def as_example(self, input_data: pd.DataFrame | np.ndarray | str | None): elif isinstance(input_data, np.ndarray): return input_data.tolist() return input_data + + def example_inputs(self) -> Any: + return {"headers": ["a", "b"], "data": [["foo", "bar"]]} diff --git a/gradio/components/dataset.py b/gradio/components/dataset.py index cf4fa308349c..5731184335fc 100644 --- a/gradio/components/dataset.py +++ b/gradio/components/dataset.py @@ -6,21 +6,19 @@ from typing import Any, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable from gradio.components.base import ( Component, - IOComponent, _Keywords, get_component_instance, ) -from gradio.events import Clickable, Selectable +from gradio.events import Events set_documentation_group("component") @document() -class Dataset(Clickable, Selectable, Component, StringSerializable): +class Dataset(Component): """ Used to create an output widget for showing datasets. Used to render the examples box. @@ -28,11 +26,13 @@ class Dataset(Clickable, Selectable, Component, StringSerializable): Postprocessing: expects a {list} of {lists} corresponding to the dataset data. """ + EVENTS = [Events.click, Events.select] + def __init__( self, *, label: str | None = None, - components: list[IOComponent] | list[str], + components: list[Component] | list[str], samples: list[list[Any]] | None = None, headers: list[str] | None = None, type: Literal["values", "index"] = "values", @@ -59,20 +59,19 @@ def __init__( scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. """ - Component.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + super().__init__( + visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs ) self.container = container self.scale = scale self.min_width = min_width self._components = [get_component_instance(c) for c in components] - # Narrow type to IOComponent - if not all(isinstance(c, IOComponent) for c in self._components): - raise ValueError( - "All components in a `Dataset` must be subclasses of `IOComponent`" - ) - self._components = [c for c in self._components if isinstance(c, IOComponent)] + # Narrow type to Component + assert all( + isinstance(c, Component) for c in self._components + ), "All components in a `Dataset` must be subclasses of `Component`" + self._components = [c for c in self._components if isinstance(c, Component)] for component in self._components: component.root_url = self.root_url @@ -90,6 +89,10 @@ def __init__( self.headers = [c.label or "" for c in self._components] self.samples_per_page = samples_per_page + @property + def skip_api(self): + return True + @staticmethod def update( samples: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, @@ -134,3 +137,6 @@ def postprocess(self, samples: list[list[Any]]) -> dict: "samples": samples, "__type__": "update", } + + def example_inputs(self) -> Any: + return None diff --git a/gradio/components/dropdown.py b/gradio/components/dropdown.py index 13fb1e66170b..fc9e30f9c927 100644 --- a/gradio/components/dropdown.py +++ b/gradio/components/dropdown.py @@ -6,31 +6,16 @@ from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable -from gradio.components.base import FormComponent, IOComponent, _Keywords +from gradio.components.base import FormComponent, _Keywords from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Changeable, - EventListenerMethod, - Focusable, - Inputable, - Selectable, -) +from gradio.events import Events set_documentation_group("component") @document() -class Dropdown( - FormComponent, - Changeable, - Inputable, - Selectable, - Focusable, - IOComponent, - SimpleSerializable, -): +class Dropdown(FormComponent): """ Creates a dropdown of choices from which entries can be selected. Preprocessing: passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`. @@ -39,6 +24,8 @@ class Dropdown( Demos: sentence_builder, titanic_survival """ + EVENTS = [Events.change, Events.input, Events.select, Events.focus, Events.blur] + def __init__( self, choices: list[str | int | float | tuple[str, str | int | float]] | None = None, @@ -112,14 +99,7 @@ def __init__( self.allow_custom_value = allow_custom_value self.interpret_by_tokens = False self.filterable = filterable - self.select: EventListenerMethod - """ - Event listener for when the user selects Dropdown option. - Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -135,28 +115,24 @@ def __init__( **kwargs, ) - def api_info(self) -> dict[str, dict | bool]: + def api_info(self) -> dict[str, Any]: if self.multiselect: - type = { + json_type = { "type": "array", - "items": {"type": "string"}, - "description": f"List of options from: {self.choices}", + "items": {"type": "string", "enum": [c[1] for c in self.choices]}, } else: - type = {"type": "string", "description": f"Option from: {self.choices}"} - return {"info": type, "serialized_info": False} + json_type = { + "type": "string", + "enum": [c[1] for c in self.choices], + } + return json_type - def example_inputs(self) -> dict[str, Any]: + def example_inputs(self) -> Any: if self.multiselect: - return { - "raw": [self.choices[0]] if self.choices else [], - "serialized": [self.choices[0]] if self.choices else [], - } + return [self.choices[0][1]] if self.choices else [] else: - return { - "raw": self.choices[0] if self.choices else None, - "serialized": self.choices[0] if self.choices else None, - } + return self.choices[0][1] if self.choices else None @staticmethod def update( @@ -241,27 +217,6 @@ def postprocess(self, y): self._warn_if_invalid_choice(y) return y - def set_interpret_parameters(self): - """ - Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected. - """ - return self - - def get_interpretation_neighbors(self, x): - choices = list(self.choices) - choices.remove(x) - return choices, {} - - def get_interpretation_scores( - self, x, neighbors, scores: list[float | None], **kwargs - ) -> list: - """ - Returns: - Each value represents the interpretation score corresponding to each choice. - """ - scores.insert(self.choices.index(x), None) - return scores - def style(self, *, container: bool | None = None, **kwargs): """ This method is deprecated. Please set these arguments in the constructor instead. diff --git a/gradio/components/duplicate_button.py b/gradio/components/duplicate_button.py index c6b8f486a9cf..9e524ef719df 100644 --- a/gradio/components/duplicate_button.py +++ b/gradio/components/duplicate_button.py @@ -54,7 +54,7 @@ def __init__( min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. """ super().__init__( - value, + value=value, variant=variant, size=size, icon=icon, diff --git a/gradio/components/fallback.py b/gradio/components/fallback.py new file mode 100644 index 000000000000..2a229e13c400 --- /dev/null +++ b/gradio/components/fallback.py @@ -0,0 +1,15 @@ +from gradio.components.base import Component + + +class Fallback(Component): + def preprocess(self, x): + return x + + def postprocess(self, x): + return x + + def example_inputs(self): + return {"foo": "bar"} + + def api_info(self): + return {"type": {}, "description": "any valid json"} diff --git a/gradio/components/file.py b/gradio/components/file.py index 32aab3b3fb06..577e8c11e4c3 100644 --- a/gradio/components/file.py +++ b/gradio/components/file.py @@ -5,35 +5,25 @@ import tempfile import warnings from pathlib import Path -from typing import Any, Callable, Literal +from typing import Any, Callable, List, Literal from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import FileSerializable -from gradio import utils -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData, GradioRootModel from gradio.deprecation import warn_deprecation -from gradio.events import ( - Changeable, - Clearable, - EventListenerMethod, - Selectable, - Uploadable, -) +from gradio.events import Events set_documentation_group("component") +class ListFiles(GradioRootModel): + root: List[FileData] + + @document() -class File( - Changeable, - Selectable, - Clearable, - Uploadable, - IOComponent, - FileSerializable, -): +class File(Component): """ Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output). Preprocessing: passes the uploaded file as a {tempfile._TemporaryFileWrapper} or {List[tempfile._TemporaryFileWrapper]} depending on `file_count` (or a {bytes}/{List[bytes]} depending on `type`) @@ -42,6 +32,8 @@ class File( Demos: zip_to_json, zip_files """ + EVENTS = [Events.change, Events.select, Events.clear, Events.upload] + def __init__( self, value: str | list[str] | Callable | None = None, @@ -81,6 +73,10 @@ def __init__( elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ self.file_count = file_count + if self.file_count == "multiple": + self.data_model = ListFiles + else: + self.data_model = FileData self.file_types = file_types if file_types is not None and not isinstance(file_types, list): raise ValueError( @@ -103,16 +99,7 @@ def __init__( warnings.warn( "The `file_types` parameter is ignored when `file_count` is 'directory'." ) - self.type = type - self.height = height - self.select: EventListenerMethod - """ - Event listener for when the user selects file from list. - Uses event data gradio.SelectData to carry `value` referring to name of selected file, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -126,6 +113,8 @@ def __init__( value=value, **kwargs, ) + self.type = type + self.height = height @staticmethod def update( @@ -155,6 +144,30 @@ def update( "__type__": "update", } + @staticmethod + def _process_single_file( + f, type: Literal["file", "bytes", "binary"], cache_dir: str + ) -> bytes | tempfile._TemporaryFileWrapper: + file_name, data, is_file = ( + f["name"], + f["data"], + f.get("is_file", False), + ) + if type == "file": + file = tempfile.NamedTemporaryFile(delete=False, dir=cache_dir) + file.name = file_name + file.orig_name = file_name # type: ignore + return file + elif type in {"bytes", "binary"}: + if is_file: + with open(file_name, "rb") as file_data: + return file_data.read() + return client_utils.decode_base64_to_binary(data)[0] + else: + raise ValueError( + "Unknown type: " + str(type) + ". Please choose from: 'file', 'bytes'." + ) + def preprocess( self, x: list[dict[str, Any]] | None ) -> ( @@ -172,56 +185,20 @@ def preprocess( if x is None: return None - def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper: - file_name, data, is_file = ( - f["name"], - f["data"], - f.get("is_file", False), - ) - if self.type == "file": - if is_file: - path = self.make_temp_copy_if_needed(file_name) - else: - data, _ = client_utils.decode_base64_to_binary(data) - path = self.file_bytes_to_file(data, file_name=file_name) - path = str(utils.abspath(path)) - self.temp_files.add(path) - - # Creation of tempfiles here - file = tempfile.NamedTemporaryFile( - delete=False, dir=self.DEFAULT_TEMP_DIR - ) - file.name = path - file.orig_name = file_name # type: ignore - return file - elif ( - self.type == "binary" or self.type == "bytes" - ): # "bytes" is included for backwards compatibility - if is_file: - with open(file_name, "rb") as file_data: - return file_data.read() - return client_utils.decode_base64_to_binary(data)[0] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'file', 'bytes'." - ) - if self.file_count == "single": if isinstance(x, list): - return process_single_file(x[0]) + return self._process_single_file( + x[0], type=self.type, cache_dir=self.GRADIO_CACHE # type: ignore + ) else: - return process_single_file(x) + return self._process_single_file(x, type=self.type, cache_dir=self.GRADIO_CACHE) # type: ignore else: if isinstance(x, list): - return [process_single_file(f) for f in x] + return [self._process_single_file(f, type=self.type, cache_dir=self.GRADIO_CACHE) for f in x] # type: ignore else: - return process_single_file(x) + return self._process_single_file(x, type=self.type, cache_dir=self.GRADIO_CACHE) # type: ignore - def postprocess( - self, y: str | list[str] | None - ) -> dict[str, Any] | list[dict[str, Any]] | None: + def postprocess(self, y: str | list[str] | None) -> ListFiles | FileData | None: """ Parameters: y: file path @@ -231,25 +208,24 @@ def postprocess( if y is None: return None if isinstance(y, list): - return [ - { - "orig_name": Path(file).name, - "name": self.make_temp_copy_if_needed(file), - "size": Path(file).stat().st_size, - "data": None, - "is_file": True, - } - for file in y - ] + return ListFiles( + root=[ + FileData( + name=file, + orig_name=Path(file).name, + size=Path(file).stat().st_size, + is_file=True, + ) + for file in y + ] + ) else: - d = { - "orig_name": Path(y).name, - "name": self.make_temp_copy_if_needed(y), - "size": Path(y).stat().st_size, - "data": None, - "is_file": True, - } - return d + return FileData( + name=y, + orig_name=Path(y).name, + size=Path(y).stat().st_size, + is_file=True, + ) def as_example(self, input_data: str | list | None) -> str: if input_data is None: @@ -259,20 +235,10 @@ def as_example(self, input_data: str | list | None) -> str: else: return Path(input_data).name - def api_info(self) -> dict[str, dict | bool]: + def example_inputs(self) -> Any: if self.file_count == "single": - return self._single_file_api_info() + return "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" else: - return self._multiple_file_api_info() - - def serialized_info(self): - if self.file_count == "single": - return self._single_file_serialized_info() - else: - return self._multiple_file_serialized_info() - - def example_inputs(self) -> dict[str, Any]: - if self.file_count == "single": - return self._single_file_example_inputs() - else: - return self._multiple_file_example_inputs() + return [ + "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" + ] diff --git a/gradio/components/file_explorer.py b/gradio/components/file_explorer.py index 7fbe2bdac477..97b490fbdf36 100644 --- a/gradio/components/file_explorer.py +++ b/gradio/components/file_explorer.py @@ -7,30 +7,33 @@ import re from glob import glob as glob_func from pathlib import Path -from typing import Callable, Literal +from typing import Any, Callable, List, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable -from gradio.components.base import IOComponent, server -from gradio.events import ( - Changeable, - EventListenerMethod, -) +from gradio.components.base import Component, server +from gradio.data_classes import GradioRootModel set_documentation_group("component") +class FileExplorerData(GradioRootModel): + root: List[List[str]] + + @document() -class FileExplorer(Changeable, IOComponent, JSONSerializable): +class FileExplorer(Component): """ Creates a file explorer component that allows users to browse and select files on the machine hosting the Gradio app. - Preprocessing: passes the selected file or directory as a {str} path (relative to root) or {list[str]} depending on `file_count` + Preprocessing: passes the selected file or directory as a {str} path (relative to root) or {list[str}} depending on `file_count` Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files. Examples-format: a {str} path to a local file that populates the component. Demos: zip_to_json, zip_files """ + EVENTS = ["change"] + data_model = FileExplorerData + def __init__( self, glob: str = "**/*.*", @@ -81,14 +84,8 @@ def __init__( ) self.file_count = file_count self.height = height - self.select: EventListenerMethod - """ - Event listener for when the user selects file from list. - Uses event data gradio.SelectData to carry `value` referring to name of selected file, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + + super().__init__( label=label, every=every, show_label=show_label, @@ -103,6 +100,9 @@ def __init__( **kwargs, ) + def example_inputs(self) -> Any: + return [["Users", "gradio", "app.py"]] + def preprocess(self, x: list[list[str]] | None) -> list[str] | str | None: """ Parameters: @@ -125,7 +125,7 @@ def _strip_root(self, path): return path[len(self.root) + 1 :] return path - def postprocess(self, y: str | list[str] | None) -> list[list[str]] | None: + def postprocess(self, y: str | list[str] | None) -> FileExplorerData | None: """ Parameters: y: file path @@ -137,7 +137,9 @@ def postprocess(self, y: str | list[str] | None) -> list[list[str]] | None: files = [y] if isinstance(y, str) else y - return [self._strip_root(file).split(os.path.sep) for file in (files)] + return FileExplorerData( + root=[self._strip_root(file).split(os.path.sep) for file in files] + ) @server def ls(self, y=None) -> list[dict[str, str]] | None: diff --git a/gradio/components/gallery.py b/gradio/components/gallery.py index 56b7b21494dd..2e37028642d2 100644 --- a/gradio/components/gallery.py +++ b/gradio/components/gallery.py @@ -4,27 +4,31 @@ import warnings from pathlib import Path -from typing import Any, Callable, Literal +from typing import Any, Callable, List, Literal, Optional import numpy as np from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import GallerySerializable from PIL import Image as _Image # using _ to minimize namespace pollution -from gradio import utils -from gradio.components.base import IOComponent, _Keywords -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import ( - Changeable, - EventListenerMethod, - Selectable, -) +from gradio import processing_utils, utils +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData, GradioModel, GradioRootModel +from gradio.events import Events set_documentation_group("component") +class GalleryImage(GradioModel): + image: FileData + caption: Optional[str] = None + + +class GalleryData(GradioRootModel): + root: List[GalleryImage] + + @document() -class Gallery(IOComponent, GallerySerializable, Changeable, Selectable): +class Gallery(Component): """ Used to display a list of images as a gallery that can be scrolled through. Preprocessing: this component does *not* accept input. @@ -33,6 +37,10 @@ class Gallery(IOComponent, GallerySerializable, Changeable, Selectable): Demos: fake_gan """ + EVENTS = [Events.select] + + data_model = GalleryData + def __init__( self, value: list[np.ndarray | _Image.Image | str | Path | tuple] @@ -94,20 +102,14 @@ def __init__( if show_download_button is None else show_download_button ) - self.select: EventListenerMethod self.selected_index = selected_index - """ - Event listener for when the user selects image within Gallery. - Uses event data gradio.SelectData to carry `value` referring to caption of selected image, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ + self.show_share_button = ( (utils.get_space() is not None) if show_share_button is None else show_share_button ) - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -168,7 +170,7 @@ def postprocess( y: list[np.ndarray | _Image.Image | str] | list[tuple[np.ndarray | _Image.Image | str, str]] | None, - ) -> list[str]: + ) -> GalleryData: """ Parameters: y: list of images, or list of (image, caption) tuples @@ -176,68 +178,37 @@ def postprocess( list of string file paths to images in temp directory """ if y is None: - return [] + return GalleryData(root=[]) output = [] for img in y: caption = None if isinstance(img, (tuple, list)): img, caption = img if isinstance(img, np.ndarray): - file = self.img_array_to_temp_file(img, dir=self.DEFAULT_TEMP_DIR) + file = processing_utils.save_img_array_to_cache( + img, cache_dir=self.GRADIO_CACHE + ) file_path = str(utils.abspath(file)) - self.temp_files.add(file_path) elif isinstance(img, _Image.Image): - file = self.pil_to_temp_file(img, dir=self.DEFAULT_TEMP_DIR) + file = processing_utils.save_pil_to_cache( + img, cache_dir=self.GRADIO_CACHE + ) file_path = str(utils.abspath(file)) - self.temp_files.add(file_path) elif isinstance(img, (str, Path)): - if utils.validate_url(img): - file_path = img - else: - file_path = self.make_temp_copy_if_needed(img) + file_path = str(img) else: raise ValueError(f"Cannot process type as image: {type(img)}") - if caption is not None: - output.append( - [{"name": file_path, "data": None, "is_file": True}, caption] - ) - else: - output.append({"name": file_path, "data": None, "is_file": True}) + entry = GalleryImage( + image=FileData(name=file_path, is_file=True), caption=caption + ) + output.append(entry) + return GalleryData(root=output) - return output + def preprocess(self, x: Any) -> Any: + return x - def style( - self, - *, - grid: int | tuple | None = None, - columns: int | tuple | None = None, - rows: int | tuple | None = None, - height: str | None = None, - container: bool | None = None, - preview: bool | None = None, - object_fit: str | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if grid is not None: - warn_deprecation( - "The 'grid' parameter will be deprecated. Please use 'columns' in the constructor instead.", - ) - self.columns = grid - if columns is not None: - self.columns = columns - if rows is not None: - self.rows = rows - if height is not None: - self.height = height - if preview is not None: - self.preview = preview - if object_fit is not None: - self.object_fit = object_fit - if container is not None: - self.container = container - return self + def example_inputs(self) -> Any: + return [ + "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png" + ] diff --git a/gradio/components/highlighted_text.py b/gradio/components/highlighted_text.py index a4d41fc4a8e4..a6938792d22a 100644 --- a/gradio/components/highlighted_text.py +++ b/gradio/components/highlighted_text.py @@ -3,22 +3,29 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, List, Literal, Union from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import ( - JSONSerializable, -) -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import GradioModel, GradioRootModel from gradio.deprecation import warn_style_method_deprecation -from gradio.events import Changeable, EventListenerMethod, Selectable +from gradio.events import Events set_documentation_group("component") +class HighlightedToken(GradioModel): + token: str + class_or_confidence: Union[str, float, None] = None + + +class HighlightedTextData(GradioRootModel): + root: List[HighlightedToken] + + @document() -class HighlightedText(Changeable, Selectable, IOComponent, JSONSerializable): +class HighlightedText(Component): """ Displays text that contains spans that are highlighted by category or numerical value. Preprocessing: passes a list of tuples as a {List[Tuple[str, float | str | None]]]} into the function. If no labels are provided, the text will be displayed as a single span. @@ -28,6 +35,9 @@ class HighlightedText(Changeable, Selectable, IOComponent, JSONSerializable): Guides: named-entity-recognition """ + data_model = HighlightedTextData + EVENTS = [Events.change, Events.select] + def __init__( self, value: list[tuple[str, str | float | None]] | dict | Callable | None = None, @@ -72,14 +82,7 @@ def __init__( self.show_legend = show_legend self.combine_adjacent = combine_adjacent self.adjacent_separator = adjacent_separator - self.select: EventListenerMethod - """ - Event listener for when the user selects Highlighted text span. - Uses event data gradio.SelectData to carry `value` referring to selected [text, label] tuple, and `index` to refer to span index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -94,6 +97,9 @@ def __init__( **kwargs, ) + def example_inputs(self) -> Any: + return {"value": [{"token": "Hello", "class_or_confidence": "1"}]} + @staticmethod def update( value: list[tuple[str, str | float | None]] @@ -130,7 +136,7 @@ def update( def postprocess( self, y: list[tuple[str, str | float | None]] | dict | None - ) -> list[tuple[str, str | float | None]] | None: + ) -> HighlightedTextData | None: """ Parameters: y: List of (word, category) tuples, or a dictionary of two keys: "text", and "entities", which itself is a list of dictionaries, each of which have the keys: "entity" (or "entity_group"), "start", and "end" @@ -182,9 +188,19 @@ def postprocess( running_category = category if running_text is not None: output.append((running_text, running_category)) - return output + return HighlightedTextData( + root=[ + HighlightedToken(token=o[0], class_or_confidence=o[1]) + for o in output + ] + ) else: - return y + return HighlightedTextData( + root=[HighlightedToken(token=o[0], class_or_confidence=o[1]) for o in y] + ) + + def preprocess(self, x: Any) -> Any: + return super().preprocess(x) def style( self, diff --git a/gradio/components/html.py b/gradio/components/html.py index 19199abc6c9f..aea0847396ee 100644 --- a/gradio/components/html.py +++ b/gradio/components/html.py @@ -6,16 +6,15 @@ from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import IOComponent, _Keywords -from gradio.events import Changeable +from gradio.components.base import Component, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class HTML(Changeable, IOComponent, StringSerializable): +class HTML(Component): """ Used to display arbitrary HTML output. Preprocessing: this component does *not* accept input. @@ -25,6 +24,8 @@ class HTML(Changeable, IOComponent, StringSerializable): Guides: key-features """ + EVENTS = [Events.change] + def __init__( self, value: str | Callable = "", @@ -47,8 +48,7 @@ def __init__( elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -59,6 +59,15 @@ def __init__( **kwargs, ) + def example_inputs(self) -> Any: + return "

Hello

" + + def preprocess(self, x: Any) -> Any: + return x + + def postprocess(self, y): + return y + @staticmethod def update( value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, @@ -77,3 +86,6 @@ def update( "__type__": "update", } return updated_config + + def api_info(self) -> dict[str, Any]: + return {"type": "string"} diff --git a/gradio/components/image.py b/gradio/components/image.py index b5fd6a19cd02..de3c997bd708 100644 --- a/gradio/components/image.py +++ b/gradio/components/image.py @@ -11,39 +11,19 @@ import PIL.ImageOps from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import ImgSerializable from PIL import Image as _Image # using _ to minimize namespace pollution from gradio import processing_utils, utils -from gradio.components.base import IOComponent, _Keywords -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Changeable, - Clearable, - Editable, - EventListenerMethod, - Selectable, - Streamable, - Uploadable, -) -from gradio.interpretation import TokenInterpretable +from gradio.components.base import Component, StreamingInput, _Keywords +from gradio.data_classes import FileData +from gradio.events import Events set_documentation_group("component") _Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 @document() -class Image( - Editable, - Clearable, - Changeable, - Streamable, - Selectable, - Uploadable, - IOComponent, - ImgSerializable, - TokenInterpretable, -): +class Image(StreamingInput, Component): """ Creates an image component that can be used to upload/draw images (as an input) or display images (as an output). Preprocessing: passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`. @@ -53,6 +33,16 @@ class Image( Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary_app, create-your-own-friends-with-a-gan """ + EVENTS = [ + Events.edit, + Events.clear, + Events.change, + Events.stream, + Events.select, + Events.upload, + ] + data_model = FileData + def __init__( self, value: str | _Image.Image | np.ndarray | None = None, @@ -144,19 +134,12 @@ def __init__( self.show_download_button = show_download_button if streaming and source != "webcam": raise ValueError("Image streaming only available if source is 'webcam'.") - self.select: EventListenerMethod - """ - Event listener for when the user clicks on a pixel within the image. - Uses event data gradio.SelectData to carry `index` to refer to the [x, y] coordinates of the clicked pixel. - See EventData documentation on how to use this event data. - """ self.show_share_button = ( (utils.get_space() is not None) if show_share_button is None else show_share_button ) - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -170,7 +153,6 @@ def __init__( value=value, **kwargs, ) - TokenInterpretable.__init__(self) @staticmethod def update( @@ -224,10 +206,9 @@ def _format_image( elif self.type == "numpy": return np.array(im) elif self.type == "filepath": - path = self.pil_to_temp_file( - im, dir=self.DEFAULT_TEMP_DIR, format=fmt or "png" + path = processing_utils.save_pil_to_cache( + im, cache_dir=self.GRADIO_CACHE, format=fmt or "png" # type: ignore ) - self.temp_files.add(path) return path else: raise ValueError( @@ -253,8 +234,10 @@ def preprocess( assert isinstance(x, dict) x, mask = x["image"], x["mask"] - assert isinstance(x, str) - im = processing_utils.decode_base64_to_image(x) + if isinstance(x, str): + im = processing_utils.decode_base64_to_image(x) + else: + im = _Image.open(x["name"]) with warnings.catch_warnings(): warnings.simplefilter("ignore") im = im.convert(self.image_mode) @@ -284,7 +267,7 @@ def preprocess( def postprocess( self, y: np.ndarray | _Image.Image | str | Path | None - ) -> str | None: + ) -> FileData | None: """ Parameters: y: image as a numpy array, PIL Image, string/Path filepath, or string URL @@ -294,125 +277,19 @@ def postprocess( if y is None: return None if isinstance(y, np.ndarray): - return processing_utils.encode_array_to_base64(y) + path = processing_utils.save_img_array_to_cache( + y, cache_dir=self.GRADIO_CACHE + ) elif isinstance(y, _Image.Image): - return processing_utils.encode_pil_to_base64(y) + path = processing_utils.save_pil_to_cache(y, cache_dir=self.GRADIO_CACHE) elif isinstance(y, (str, Path)): - return client_utils.encode_url_or_file_to_base64(y) + path = y if isinstance(y, str) else y.name else: raise ValueError("Cannot process this value as an Image") - - def set_interpret_parameters(self, segments: int = 16): - """ - Calculates interpretation score of image subsections by splitting the image into subsections, then using a "leave one out" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value. - Parameters: - segments: Number of interpretation segments to split image into. - """ - self.interpretation_segments = segments - return self - - def _segment_by_slic(self, x): - """ - Helper method that segments an image into superpixels using slic. - Parameters: - x: base64 representation of an image - """ - x = processing_utils.decode_base64_to_image(x) - if self.shape is not None: - x = processing_utils.resize_and_crop(x, self.shape) - resized_and_cropped_image = np.array(x) - try: - from skimage.segmentation import slic - except (ImportError, ModuleNotFoundError) as err: - raise ValueError( - "Error: running this interpretation for images requires scikit-image, please install it first." - ) from err - try: - segments_slic = slic( - resized_and_cropped_image, - self.interpretation_segments, - compactness=10, - sigma=1, - start_label=1, - ) - except TypeError: # For skimage 0.16 and older - segments_slic = slic( - resized_and_cropped_image, - self.interpretation_segments, - compactness=10, - sigma=1, - ) - return segments_slic, resized_and_cropped_image - - def tokenize(self, x): - """ - Segments image into tokens, masks, and leave-one-out-tokens - Parameters: - x: base64 representation of an image - Returns: - tokens: list of tokens, used by the get_masked_input() method - leave_one_out_tokens: list of left-out tokens, used by the get_interpretation_neighbors() method - masks: list of masks, used by the get_interpretation_neighbors() method - """ - segments_slic, resized_and_cropped_image = self._segment_by_slic(x) - tokens, masks, leave_one_out_tokens = [], [], [] - replace_color = np.mean(resized_and_cropped_image, axis=(0, 1)) - for segment_value in np.unique(segments_slic): - mask = segments_slic == segment_value - image_screen = np.copy(resized_and_cropped_image) - image_screen[segments_slic == segment_value] = replace_color - leave_one_out_tokens.append( - processing_utils.encode_array_to_base64(image_screen) - ) - token = np.copy(resized_and_cropped_image) - token[segments_slic != segment_value] = 0 - tokens.append(token) - masks.append(mask) - return tokens, leave_one_out_tokens, masks - - def get_masked_inputs(self, tokens, binary_mask_matrix): - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.zeros_like(tokens[0], dtype=int) - for token, b in zip(tokens, binary_mask_vector): - masked_input = masked_input + token * int(b) - masked_inputs.append(processing_utils.encode_array_to_base64(masked_input)) - return masked_inputs - - def get_interpretation_scores( - self, x, neighbors, scores, masks, tokens=None, **kwargs - ) -> list[list[float]]: - """ - Returns: - A 2D array representing the interpretation score of each pixel of the image. - """ - x = processing_utils.decode_base64_to_image(x) - if self.shape is not None: - x = processing_utils.resize_and_crop(x, self.shape) - x = np.array(x) - output_scores = np.zeros((x.shape[0], x.shape[1])) - - for score, mask in zip(scores, masks): - output_scores += score * mask - - max_val, min_val = np.max(output_scores), np.min(output_scores) - if max_val > 0: - output_scores = (output_scores - min_val) / (max_val - min_val) - return output_scores.tolist() - - def style(self, *, height: int | None = None, width: int | None = None, **kwargs): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if height is not None: - self.height = height - if width is not None: - self.width = width - return self + return FileData(name=path, data=None, is_file=True) def check_streamable(self): - if self.source != "webcam": + if self.source != "webcam" and self.streaming: raise ValueError("Image streaming only available if source is 'webcam'.") def as_example(self, input_data: str | Path | None) -> str: @@ -423,3 +300,6 @@ def as_example(self, input_data: str | Path | None) -> str: if self.root_url or client_utils.is_http_url_like(input_data): return input_data return str(utils.abspath(input_data)) + + def example_inputs(self) -> Any: + return "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png" diff --git a/gradio/components/interpretation.py b/gradio/components/interpretation.py deleted file mode 100644 index b261f4f637d2..000000000000 --- a/gradio/components/interpretation.py +++ /dev/null @@ -1,55 +0,0 @@ -"""gr.Interpretation() component""" - -from __future__ import annotations - -from typing import Any, Literal - -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable - -from gradio.components.base import Component, _Keywords - -set_documentation_group("component") - - -@document() -class Interpretation(Component, SimpleSerializable): - """ - Used to create an interpretation widget for a component. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {dict} with keys "original" and "interpretation". - - Guides: custom-interpretations-with-blocks - """ - - def __init__( - self, - component: Component, - *, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - component: Which component to show in the interpretation widget. - visible: Whether or not the interpretation is visible. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - Component.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - self.component = component - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - ): - return { - "visible": visible, - "value": value, - "__type__": "update", - } diff --git a/gradio/components/json_component.py b/gradio/components/json_component.py index 46bef37ba671..02a75e2bcdec 100644 --- a/gradio/components/json_component.py +++ b/gradio/components/json_component.py @@ -4,22 +4,20 @@ import json import warnings +from pathlib import Path from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Changeable, -) +from gradio.events import Events set_documentation_group("component") @document() -class JSON(Changeable, IOComponent, JSONSerializable): +class JSON(Component): """ Used to display arbitrary JSON output prettily. Preprocessing: this component does *not* accept input. @@ -28,6 +26,8 @@ class JSON(Changeable, IOComponent, JSONSerializable): Demos: zip_to_json, blocks_xray """ + EVENTS = [Events.change] + def __init__( self, value: str | dict | list | Callable | None = None, @@ -56,8 +56,7 @@ def __init__( elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -110,6 +109,18 @@ def postprocess(self, y: dict | list | str | None) -> dict | list | None: else: return y + def preprocess(self, x: Any) -> Any: + return x + + def example_inputs(self) -> Any: + return {"foo": "bar"} + + def flag(self, x: Any, flag_dir: str | Path = "") -> str: + return json.dumps(x) + + def read_from_flag(self, x: Any, flag_dir: str | Path | None = None): + return json.loads(x) + def style(self, *, container: bool | None = None, **kwargs): """ This method is deprecated. Please set these arguments in the constructor instead. @@ -118,3 +129,6 @@ def style(self, *, container: bool | None = None, **kwargs): if container is not None: self.container = container return self + + def api_info(self) -> dict[str, Any]: + return {"type": {}, "description": "any valid json"} diff --git a/gradio/components/label.py b/gradio/components/label.py index f6e965b0a3f8..0d755e5e569d 100644 --- a/gradio/components/label.py +++ b/gradio/components/label.py @@ -2,29 +2,34 @@ from __future__ import annotations +import json import operator import warnings from pathlib import Path -from typing import Callable, Literal +from typing import Any, Callable, List, Literal, Optional, Union from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import ( - JSONSerializable, -) -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import GradioModel from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Changeable, - EventListenerMethod, - Selectable, -) +from gradio.events import Events set_documentation_group("component") +class LabelConfidence(GradioModel): + label: Optional[Union[str, int, float]] = None + confidence: Optional[float] = None + + +class LabelData(GradioModel): + label: Union[str, int, float] + confidences: Optional[List[LabelConfidence]] = None + + @document() -class Label(Changeable, Selectable, IOComponent, JSONSerializable): +class Label(Component): """ Displays a classification label, along with confidence scores of top categories, if provided. Preprocessing: this component does *not* accept input. @@ -35,6 +40,8 @@ class Label(Changeable, Selectable, IOComponent, JSONSerializable): """ CONFIDENCES_KEY = "confidences" + data_model = LabelData + EVENTS = [Events.change, Events.select] def __init__( self, @@ -70,14 +77,7 @@ def __init__( """ self.num_top_classes = num_top_classes self.color = color - self.select: EventListenerMethod - """ - Event listener for when the user selects a category from Label. - Uses event data gradio.SelectData to carry `value` referring to name of selected category, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -91,7 +91,9 @@ def __init__( **kwargs, ) - def postprocess(self, y: dict[str, float] | str | float | None) -> dict | None: + def postprocess( + self, y: dict[str, float] | str | float | None + ) -> LabelData | dict | None: """ Parameters: y: a dictionary mapping labels to confidence value, or just a string/numerical label by itself @@ -101,9 +103,9 @@ def postprocess(self, y: dict[str, float] | str | float | None) -> dict | None: if y is None or y == {}: return {} if isinstance(y, str) and y.endswith(".json") and Path(y).exists(): - return self.serialize(y) + return LabelData(**json.loads(Path(y).read_text())) if isinstance(y, (str, float, int)): - return {"label": str(y)} + return LabelData(label=str(y)) if isinstance(y, dict): if "confidences" in y and isinstance(y["confidences"], dict): y = y["confidences"] @@ -111,12 +113,15 @@ def postprocess(self, y: dict[str, float] | str | float | None) -> dict | None: sorted_pred = sorted(y.items(), key=operator.itemgetter(1), reverse=True) if self.num_top_classes is not None: sorted_pred = sorted_pred[: self.num_top_classes] - return { - "label": sorted_pred[0][0], - "confidences": [ - {"label": pred[0], "confidence": pred[1]} for pred in sorted_pred - ], - } + return LabelData( + **{ + "label": sorted_pred[0][0], + "confidences": [ + {"label": pred[0], "confidence": pred[1]} + for pred in sorted_pred + ], + } + ) raise ValueError( "The `Label` output interface expects one of: a string label, or an int label, a " "float label, or a dictionary whose keys are labels and values are confidences. " @@ -175,3 +180,15 @@ def style( if container is not None: self.container = container return self + + def preprocess(self, x: Any) -> Any: + return x + + def example_inputs(self) -> Any: + return { + "label": "Cat", + "confidences": [ + {"label": "cat", "confidence": 0.9}, + {"label": "dog", "confidence": 0.1}, + ], + } diff --git a/gradio/components/line_plot.py b/gradio/components/line_plot.py index 7feb215679ce..59b523354918 100644 --- a/gradio/components/line_plot.py +++ b/gradio/components/line_plot.py @@ -3,14 +3,14 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal import altair as alt import pandas as pd from gradio_client.documentation import document, set_documentation_group from gradio.components.base import _Keywords -from gradio.components.plot import AltairPlot, Plot +from gradio.components.plot import AltairPlot, AltairPlotData, Plot set_documentation_group("component") @@ -26,6 +26,8 @@ class LinePlot(Plot): Demos: line_plot, live_dashboard """ + data_model = AltairPlotData + def __init__( self, value: pd.DataFrame | Callable | None = None, @@ -423,7 +425,9 @@ def create_plot( return chart - def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: + def postprocess( + self, y: pd.DataFrame | dict | None + ) -> AltairPlotData | dict | None: # if None or update if y is None or isinstance(y, dict): return y @@ -453,4 +457,12 @@ def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: width=self.width, ) - return {"type": "altair", "plot": chart.to_json(), "chart": "line"} + return AltairPlotData( + **{"type": "altair", "plot": chart.to_json(), "chart": "line"} + ) + + def example_inputs(self) -> Any: + return None + + def preprocess(self, x: Any) -> Any: + return x diff --git a/gradio/components/markdown.py b/gradio/components/markdown.py index fa58269a4ff5..3ebcaa67404f 100644 --- a/gradio/components/markdown.py +++ b/gradio/components/markdown.py @@ -7,18 +7,15 @@ from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, -) +from gradio.components.base import Component, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class Markdown(IOComponent, Changeable, StringSerializable): +class Markdown(Component): """ Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs. Preprocessing: this component does *not* accept input. @@ -28,6 +25,8 @@ class Markdown(IOComponent, Changeable, StringSerializable): Guides: key-features """ + EVENTS = [Events.change] + def __init__( self, value: str | Callable = "", @@ -53,14 +52,11 @@ def __init__( line_breaks: If True, will enable Github-flavored Markdown line breaks in chatbot messages. If False (default), single new lines will be ignored. """ self.rtl = rtl - if latex_delimiters is None: - latex_delimiters = [{"left": "$", "right": "$", "display": False}] self.latex_delimiters = latex_delimiters self.sanitize_html = sanitize_html self.line_breaks = line_breaks - IOComponent.__init__( - self, + super().__init__( visible=visible, elem_id=elem_id, elem_classes=elem_classes, @@ -106,3 +102,12 @@ def update( def as_example(self, input_data: str | None) -> str: postprocessed = self.postprocess(input_data) return postprocessed if postprocessed else "" + + def preprocess(self, x: Any) -> Any: + return x + + def example_inputs(self) -> Any: + return "# Hello!" + + def api_info(self) -> dict[str, Any]: + return {"type": "string"} diff --git a/gradio/components/model3d.py b/gradio/components/model3d.py index d11bef86b2c1..af2a80f06206 100644 --- a/gradio/components/model3d.py +++ b/gradio/components/model3d.py @@ -6,25 +6,17 @@ from pathlib import Path from typing import Any, Callable, Literal -from gradio_client import media_data from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import FileSerializable -from gradio.components.base import IOComponent, _Keywords -from gradio.events import ( - Changeable, - Clearable, - Editable, - Uploadable, -) +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData +from gradio.events import Events set_documentation_group("component") @document() -class Model3D( - Changeable, Uploadable, Editable, Clearable, IOComponent, FileSerializable -): +class Model3D(Component): """ Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf). Preprocessing: This component passes the uploaded file as a {str}filepath. @@ -34,6 +26,10 @@ class Model3D( Guides: how-to-use-3D-model-component """ + EVENTS = [Events.change, Events.upload, Events.edit, Events.clear] + + data_model = FileData + def __init__( self, value: str | Callable | None = None, @@ -80,9 +76,7 @@ def __init__( self.camera_position = camera_position self.height = height self.zoom_speed = zoom_speed - - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -96,12 +90,6 @@ def __init__( **kwargs, ) - def example_inputs(self) -> dict[str, Any]: - return { - "raw": {"is_file": False, "data": media_data.BASE64_MODEL3D}, - "serialized": "https://github.com/gradio-app/gradio/raw/main/test/test_files/Box.gltf", - } - @staticmethod def update( value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, @@ -147,19 +135,9 @@ def preprocess(self, x: dict[str, str] | None) -> str | None: """ if x is None: return x - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - if is_file: - temp_file_path = self.make_temp_copy_if_needed(file_name) - else: - temp_file_path = self.base64_to_temp_file_if_needed(file_data, file_name) - - return temp_file_path + return x["name"] - def postprocess(self, y: str | Path | None) -> dict[str, str] | None: + def postprocess(self, y: str | Path | None) -> FileData | None: """ Parameters: y: path to the model @@ -168,12 +146,11 @@ def postprocess(self, y: str | Path | None) -> dict[str, str] | None: """ if y is None: return y - data = { - "name": self.make_temp_copy_if_needed(y), - "data": None, - "is_file": True, - } - return data + return FileData(name=str(y), is_file=True) def as_example(self, input_data: str | None) -> str: return Path(input_data).name if input_data else "" + + def example_inputs(self): + # TODO: Use permanent link + return "https://raw.githubusercontent.com/gradio-app/gradio/main/demo/model3D/files/Fox.gltf" diff --git a/gradio/components/number.py b/gradio/components/number.py index 6f6d0149ac50..85ad9a3aece4 100644 --- a/gradio/components/number.py +++ b/gradio/components/number.py @@ -2,38 +2,20 @@ from __future__ import annotations -import math import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal -import numpy as np from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import NumberSerializable - -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.events import ( - Changeable, - Focusable, - Inputable, - Submittable, -) + +from gradio.components.base import FormComponent, _Keywords +from gradio.events import Events from gradio.exceptions import Error -from gradio.interpretation import NeighborInterpretable set_documentation_group("component") @document() -class Number( - FormComponent, - Changeable, - Inputable, - Submittable, - Focusable, - IOComponent, - NumberSerializable, - NeighborInterpretable, -): +class Number(FormComponent): """ Creates a numeric field for user to enter numbers as input or display numeric output. Preprocessing: passes field value as a {float} or {int} into the function, depending on `precision`. @@ -43,6 +25,8 @@ class Number( Demos: tax_calculator, titanic_survival, blocks_simple_squares """ + EVENTS = [Events.change, Events.input, Events.submit, Events.focus] + def __init__( self, value: float | Callable | None = None, @@ -88,8 +72,7 @@ def __init__( self.maximum = maximum self.step = step - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -104,7 +87,6 @@ def __init__( value=value, **kwargs, ) - NeighborInterpretable.__init__(self) @staticmethod def _round_to_precision(num: float | int, precision: int | None) -> float | int: @@ -188,51 +170,8 @@ def postprocess(self, y: float | None) -> float | None: return None return self._round_to_precision(y, self.precision) - def set_interpret_parameters( - self, steps: int = 3, delta: float = 1, delta_type: str = "percent" - ): - """ - Calculates interpretation scores of numeric values close to the input number. - Parameters: - steps: Number of nearby values to measure in each direction (above and below the input number). - delta: Size of step in each direction between nearby values. - delta_type: "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change. - """ - self.interpretation_steps = steps - self.interpretation_delta = delta - self.interpretation_delta_type = delta_type - return self - - def get_interpretation_neighbors(self, x: float | int) -> tuple[list[float], dict]: - x = self._round_to_precision(x, self.precision) - if self.interpretation_delta_type == "percent": - delta = 1.0 * self.interpretation_delta * x / 100 - elif self.interpretation_delta_type == "absolute": - delta = self.interpretation_delta - else: - delta = self.interpretation_delta - if self.precision == 0 and math.floor(delta) != delta: - raise ValueError( - f"Delta value {delta} is not an integer and precision=0. Cannot generate valid set of neighbors. " - "If delta_type='percent', pick a value of delta such that x * delta is an integer. " - "If delta_type='absolute', pick a value of delta that is an integer." - ) - # run_interpretation will preprocess the neighbors so no need to convert to int here - negatives = ( - np.array(x) + np.arange(-self.interpretation_steps, 0) * delta - ).tolist() - positives = ( - np.array(x) + np.arange(1, self.interpretation_steps + 1) * delta - ).tolist() - return negatives + positives, {} - - def get_interpretation_scores( - self, x: float, neighbors: list[float], scores: list[float | None], **kwargs - ) -> list[tuple[float, float | None]]: - """ - Returns: - Each tuple set represents a numeric value near the input and its corresponding interpretation score. - """ - interpretation = list(zip(neighbors, scores)) - interpretation.insert(int(len(interpretation) / 2), (x, None)) - return interpretation + def api_info(self) -> dict[str, str]: + return {"type": "number"} + + def example_inputs(self) -> Any: + return 3 diff --git a/gradio/components/plot.py b/gradio/components/plot.py index 763f871c9d7b..03f37777efbe 100644 --- a/gradio/components/plot.py +++ b/gradio/components/plot.py @@ -10,18 +10,28 @@ import altair as alt import pandas as pd from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable from gradio import processing_utils -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import GradioModel from gradio.deprecation import warn_style_method_deprecation -from gradio.events import Changeable, Clearable +from gradio.events import Events set_documentation_group("component") +class PlotData(GradioModel): + type: Literal["altair", "bokeh", "plotly", "matplotlib"] + plot: str + + +class AltairPlotData(PlotData): + chart: Literal["bar", "line", "scatter"] + type: Literal["altair"] = "altair" + + @document() -class Plot(Changeable, Clearable, IOComponent, JSONSerializable): +class Plot(Component): """ Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported) Preprocessing: this component does *not* accept input. @@ -31,6 +41,9 @@ class Plot(Changeable, Clearable, IOComponent, JSONSerializable): Guides: plot-component-for-maps """ + data_model = PlotData + EVENTS = [Events.change, Events.clear] + def __init__( self, value: Callable | None | pd.DataFrame = None, @@ -59,8 +72,7 @@ def __init__( elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. """ - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -111,7 +123,13 @@ def update( } return updated_config - def postprocess(self, y) -> dict[str, str] | None: + def preprocess(self, x: Any) -> Any: + return x + + def example_inputs(self) -> Any: + return None + + def postprocess(self, y) -> PlotData | None: """ Parameters: y: plot data @@ -134,7 +152,7 @@ def postprocess(self, y) -> dict[str, str] | None: is_altair = "altair" in y.__module__ dtype = "altair" if is_altair else "plotly" out_y = y.to_json() - return {"type": dtype, "plot": out_y} + return PlotData(**{"type": dtype, "plot": out_y}) def style(self, container: bool | None = None): """ diff --git a/gradio/components/radio.py b/gradio/components/radio.py index 2e1d1aad80b9..8fe38483a52a 100644 --- a/gradio/components/radio.py +++ b/gradio/components/radio.py @@ -6,26 +6,15 @@ from typing import Any, Callable, Literal from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable -from gradio.interpretation import NeighborInterpretable +from gradio.components.base import FormComponent, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class Radio( - FormComponent, - Selectable, - Changeable, - Inputable, - IOComponent, - StringSerializable, - NeighborInterpretable, -): +class Radio(FormComponent): """ Creates a set of (string or numeric type) radio buttons of which only one can be selected. Preprocessing: passes the value of the selected radio button as a {str} or {int} or {float} or its index as an {int} into the function, depending on `type`. @@ -35,6 +24,8 @@ class Radio( Demos: sentence_builder, titanic_survival, blocks_essay """ + EVENTS = [Events.select, Events.change, Events.input] + def __init__( self, choices: list[str | int | float | tuple[str, str | int | float]] | None = None, @@ -84,14 +75,7 @@ def __init__( f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" ) self.type = type - self.select: EventListenerMethod - """ - Event listener for when the user selects Radio option. - Uses event data gradio.SelectData to carry `value` referring to label of selected option, and `index` to refer to index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -106,13 +90,9 @@ def __init__( value=value, **kwargs, ) - NeighborInterpretable.__init__(self) - def example_inputs(self) -> dict[str, Any]: - return { - "raw": self.choices[0][1] if self.choices else None, - "serialized": self.choices[0][1] if self.choices else None, - } + def example_inputs(self) -> Any: + return self.choices[0][1] if self.choices else None @staticmethod def update( @@ -173,38 +153,15 @@ def preprocess(self, x: str | int | float | None) -> str | int | float | None: f"Unknown type: {self.type}. Please choose from: 'value', 'index'." ) - def get_interpretation_neighbors(self, x): - choices = [value for _, value in self.choices] - choices.remove(x) - return choices, {} - - def get_interpretation_scores( - self, x, neighbors, scores: list[float | None], **kwargs - ) -> list: - """ - Returns: - Each value represents the interpretation score corresponding to each choice. - """ - choices = [value for _, value in self.choices] - scores.insert(choices.index(x), None) - return scores + def postprocess(self, y): + return y - def style( - self, - *, - item_container: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if item_container is not None: - warn_deprecation("The `item_container` parameter is deprecated.") - if container is not None: - self.container = container - return self + def api_info(self) -> dict[str, Any]: + return { + "enum": [c[1] for c in self.choices], + "title": "Radio", + "type": "string", + } def as_example(self, input_data): return next((c[0] for c in self.choices if c[1] == input_data), None) diff --git a/gradio/components/scatter_plot.py b/gradio/components/scatter_plot.py index 2369570eb87e..d5c57f96fa8a 100644 --- a/gradio/components/scatter_plot.py +++ b/gradio/components/scatter_plot.py @@ -3,7 +3,7 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal import altair as alt import pandas as pd @@ -11,7 +11,7 @@ from pandas.api.types import is_numeric_dtype from gradio.components.base import _Keywords -from gradio.components.plot import AltairPlot, Plot +from gradio.components.plot import AltairPlot, AltairPlotData, Plot set_documentation_group("component") @@ -28,6 +28,8 @@ class ScatterPlot(Plot): Guides: creating-a-dashboard-from-bigquery-data """ + data_model = AltairPlotData + def __init__( self, value: pd.DataFrame | Callable | None = None, @@ -463,7 +465,9 @@ def create_plot( return chart - def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: + def postprocess( + self, y: pd.DataFrame | dict | None + ) -> AltairPlotData | dict | None: # if None or update if y is None or isinstance(y, dict): return y @@ -495,4 +499,12 @@ def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None: y_lim=self.y_lim, ) - return {"type": "altair", "plot": chart.to_json(), "chart": "scatter"} + return AltairPlotData( + **{"type": "altair", "plot": chart.to_json(), "chart": "scatter"} + ) + + def example_inputs(self) -> Any: + return None + + def preprocess(self, x: Any) -> Any: + return x diff --git a/gradio/components/slider.py b/gradio/components/slider.py index 514fec178b6a..7c1162259993 100644 --- a/gradio/components/slider.py +++ b/gradio/components/slider.py @@ -7,28 +7,16 @@ import warnings from typing import Any, Callable, Literal -import numpy as np from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import NumberSerializable -from gradio.components.base import FormComponent, IOComponent, _Keywords -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import Changeable, Inputable, Releaseable -from gradio.interpretation import NeighborInterpretable +from gradio.components.base import FormComponent, _Keywords +from gradio.events import Events set_documentation_group("component") @document() -class Slider( - FormComponent, - Changeable, - Inputable, - Releaseable, - IOComponent, - NumberSerializable, - NeighborInterpretable, -): +class Slider(FormComponent): """ Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`. Preprocessing: passes slider value as a {float} into the function. @@ -39,6 +27,8 @@ class Slider( Guides: create-your-own-friends-with-a-gan """ + EVENTS = [Events.change, Events.input, Events.release] + def __init__( self, minimum: float = 0, @@ -89,8 +79,7 @@ def __init__( self.step = step if randomize: value = self.get_random_value - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -105,22 +94,15 @@ def __init__( value=value, **kwargs, ) - NeighborInterpretable.__init__(self) - def api_info(self) -> dict[str, dict | bool]: + def api_info(self) -> dict[str, Any]: return { - "info": { - "type": "number", - "description": f"numeric value between {self.minimum} and {self.maximum}", - }, - "serialized_info": False, + "type": "number", + "description": f"numeric value between {self.minimum} and {self.maximum}", } - def example_inputs(self) -> dict[str, Any]: - return { - "raw": self.minimum, - "serialized": self.minimum, - } + def example_inputs(self) -> Any: + return self.minimum def get_random_value(self): n_steps = int((self.maximum - self.minimum) / self.step) @@ -176,30 +158,5 @@ def postprocess(self, y: float | None) -> float | None: """ return self.minimum if y is None else y - def set_interpret_parameters(self, steps: int = 8) -> Slider: - """ - Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider. - Parameters: - steps: Number of neighboring values to measure between the minimum and maximum values of the slider range. - """ - self.interpretation_steps = steps - return self - - def get_interpretation_neighbors(self, x) -> tuple[object, dict]: - return ( - np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(), - {}, - ) - - def style( - self, - *, - container: bool | None = None, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if container is not None: - self.container = container - return self + def preprocess(self, x: Any) -> Any: + return x diff --git a/gradio/components/state.py b/gradio/components/state.py index c027875b090f..a20689adab9f 100644 --- a/gradio/components/state.py +++ b/gradio/components/state.py @@ -6,15 +6,15 @@ from typing import Any from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import SimpleSerializable -from gradio.components.base import IOComponent +from gradio.components.base import Component set_documentation_group("component") @document() -class State(IOComponent, SimpleSerializable): +class State(Component): + EVENTS = [] """ Special hidden component that stores session state across runs of the demo by the same user. The value of the State variable is cleared when the user refreshes the page. @@ -43,7 +43,23 @@ def __init__( raise TypeError( f"The initial value of `gr.State` must be able to be deepcopied. The initial value of type {type(value)} cannot be deepcopied." ) from err - IOComponent.__init__(self, value=self.value, **kwargs) + super().__init__(value=self.value, **kwargs) + + def preprocess(self, x: Any) -> Any: + return x + + def postprocess(self, y): + return y + + def api_info(self) -> dict[str, Any]: + return {"type": {}, "description": "any valid json"} + + def example_inputs(self) -> Any: + return None + + @property + def skip_api(self): + return True class Variable(State): diff --git a/gradio/components/status_tracker.py b/gradio/components/status_tracker.py index a9abec2969d9..b5a9d35f9d4d 100644 --- a/gradio/components/status_tracker.py +++ b/gradio/components/status_tracker.py @@ -1,11 +1,11 @@ """gr.StatusTracker() component.""" -from gradio_client.serializing import SimpleSerializable - from gradio.components.base import Component from gradio.deprecation import warn_deprecation -class StatusTracker(Component, SimpleSerializable): +class StatusTracker(Component): + EVENTS = [] + def __init__( self, **kwargs, diff --git a/gradio/components/textbox.py b/gradio/components/textbox.py index 7b8bed6e19a4..9617ef7faa15 100644 --- a/gradio/components/textbox.py +++ b/gradio/components/textbox.py @@ -3,43 +3,21 @@ from __future__ import annotations import warnings -from typing import Callable, Literal +from typing import Any, Callable, Literal -import numpy as np from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import StringSerializable from gradio.components.base import ( FormComponent, - IOComponent, _Keywords, ) -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - Changeable, - EventListenerMethod, - Focusable, - Inputable, - Selectable, - Submittable, -) -from gradio.interpretation import TokenInterpretable +from gradio.events import Events set_documentation_group("component") @document() -class Textbox( - FormComponent, - Changeable, - Inputable, - Selectable, - Submittable, - Focusable, - IOComponent, - StringSerializable, - TokenInterpretable, -): +class Textbox(FormComponent): """ Creates a textarea for user to enter string input or display string output. Preprocessing: passes textarea value as a {str} into the function. @@ -50,6 +28,15 @@ class Textbox( Guides: creating-a-chatbot, real-time-speech-recognition """ + EVENTS = [ + Events.change, + Events.input, + Events.select, + Events.submit, + Events.focus, + Events.blur, + ] + def __init__( self, value: str | Callable | None = "", @@ -111,15 +98,8 @@ def __init__( self.placeholder = placeholder self.show_copy_button = show_copy_button self.autofocus = autofocus - self.select: EventListenerMethod self.autoscroll = autoscroll - """ - Event listener for when the user selects text in the Textbox. - Uses event data gradio.SelectData to carry `value` referring to selected substring, and `index` tuple referring to selected range endpoints. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, + super().__init__( label=label, info=info, every=every, @@ -134,7 +114,6 @@ def __init__( value=value, **kwargs, ) - TokenInterpretable.__init__(self) self.type = type self.rtl = rtl self.text_align = text_align @@ -205,74 +184,8 @@ def postprocess(self, y: str | None) -> str | None: """ return None if y is None else str(y) - def set_interpret_parameters( - self, separator: str = " ", replacement: str | None = None - ): - """ - Calculates interpretation score of characters in input by splitting input into tokens, then using a "leave one out" method to calculate the score of each token by removing each token and measuring the delta of the output value. - Parameters: - separator: Separator to use to split input into tokens. - replacement: In the "leave one out" step, the text that the token should be replaced with. If None, the token is removed altogether. - """ - self.interpretation_separator = separator - self.interpretation_replacement = replacement - return self - - def tokenize(self, x: str) -> tuple[list[str], list[str], None]: - """ - Tokenizes an input string by dividing into "words" delimited by self.interpretation_separator - """ - tokens = x.split(self.interpretation_separator) - leave_one_out_strings = [] - for index in range(len(tokens)): - leave_one_out_set = list(tokens) - if self.interpretation_replacement is None: - leave_one_out_set.pop(index) - else: - leave_one_out_set[index] = self.interpretation_replacement - leave_one_out_strings.append( - self.interpretation_separator.join(leave_one_out_set) - ) - return tokens, leave_one_out_strings, None + def api_info(self) -> dict[str, Any]: + return {"type": "string"} - def get_masked_inputs( - self, tokens: list[str], binary_mask_matrix: list[list[int]] - ) -> list[str]: - """ - Constructs partially-masked sentences for SHAP interpretation - """ - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.array(tokens)[np.array(binary_mask_vector, dtype=bool)] - masked_inputs.append(self.interpretation_separator.join(masked_input)) - return masked_inputs - - def get_interpretation_scores( - self, x, neighbors, scores: list[float], tokens: list[str], masks=None, **kwargs - ) -> list[tuple[str, float]]: - """ - Returns: - Each tuple set represents a set of characters and their corresponding interpretation score. - """ - result = [] - for token, score in zip(tokens, scores): - result.append((token, score)) - result.append((self.interpretation_separator, 0)) - return result - - def style( - self, - *, - show_copy_button: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if show_copy_button is not None: - self.show_copy_button = show_copy_button - if container is not None: - self.container = container - return self + def example_inputs(self) -> Any: + return "Hello!!" diff --git a/gradio/components/timeseries.py b/gradio/components/timeseries.py deleted file mode 100644 index 7cdd82ecb092..000000000000 --- a/gradio/components/timeseries.py +++ /dev/null @@ -1,152 +0,0 @@ -"""gr.Timeseries() component.""" - -from __future__ import annotations - -import warnings -from pathlib import Path -from typing import Any, Callable, Literal - -import pandas as pd -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable - -from gradio.components.base import IOComponent, _Keywords -from gradio.events import Changeable - -set_documentation_group("component") - - -@document() -class Timeseries(Changeable, IOComponent, JSONSerializable): - """ - Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically. - Preprocessing: passes the uploaded timeseries data as a {pandas.DataFrame} into the function - Postprocessing: expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph - Examples-format: a {str} filepath of csv data with time series data. - Demos: fraud_detector - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - x: str | None = None, - y: str | list[str] | None = None, - colors: list[str] | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - value: File path for the timeseries csv file. If callable, the function will be called whenever the app loads to set the initial value of the component. - x: Column name of x (time) series. None if csv has no headers, in which case first column is x series. - y: Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - colors: an ordered list of colors to use for each line plot - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - interactive: if True, will allow users to upload a timeseries csv; if False, can only be used to display timeseries data. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.x = x - if isinstance(y, str): - y = [y] - self.y = y - self.colors = colors - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - interactive=interactive, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - colors: list[str] | None = None, - label: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - warnings.warn( - "Using the update method is deprecated. Simply return a new object instead, e.g. `return gr.Timeseries(...)` instead of `return gr.Timeseries.update(...)`." - ) - return { - "colors": colors, - "label": label, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - - def preprocess(self, x: dict | None) -> pd.DataFrame | None: - """ - Parameters: - x: Dict with keys 'data': 2D array of str, numeric, or bool data, 'headers': list of strings for header names, 'range': optional two element list designating start of end of subrange. - Returns: - Dataframe of timeseries data - """ - if x is None: - return x - elif x.get("is_file"): - dataframe = pd.read_csv(x["name"]) - else: - dataframe = pd.DataFrame(data=x["data"], columns=x["headers"]) - if x.get("range") is not None: - dataframe = dataframe.loc[dataframe[self.x or 0] >= x["range"][0]] - dataframe = dataframe.loc[dataframe[self.x or 0] <= x["range"][1]] - return dataframe - - def postprocess(self, y: str | pd.DataFrame | None) -> dict | None: - """ - Parameters: - y: csv or dataframe with timeseries data - Returns: - JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data - """ - if y is None: - return None - if isinstance(y, str): - dataframe = pd.read_csv(y) - return { - "headers": dataframe.columns.values.tolist(), - "data": dataframe.values.tolist(), - } - if isinstance(y, pd.DataFrame): - return {"headers": y.columns.values.tolist(), "data": y.values.tolist()} - raise ValueError("Cannot process value as Timeseries data") - - def as_example(self, input_data: str | None) -> str: - return Path(input_data).name if input_data else "" diff --git a/gradio/components/upload_button.py b/gradio/components/upload_button.py index 268c6bfc0e91..27eb6a78474e 100644 --- a/gradio/components/upload_button.py +++ b/gradio/components/upload_button.py @@ -4,22 +4,25 @@ import tempfile import warnings -from typing import Any, Callable, Literal +from typing import Any, Callable, List, Literal -from gradio_client import utils as client_utils from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import FileSerializable -from gradio import utils -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.components.file import File +from gradio.data_classes import FileData, GradioRootModel from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Clickable, Uploadable +from gradio.events import Events set_documentation_group("component") +class ListFiles(GradioRootModel): + root: List[FileData] + + @document() -class UploadButton(Clickable, Uploadable, IOComponent, FileSerializable): +class UploadButton(Component): """ Used to create an upload button, when clicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set). Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List[bytes]} depending on `type`) @@ -28,6 +31,8 @@ class UploadButton(Clickable, Uploadable, IOComponent, FileSerializable): Demos: upload_button """ + EVENTS = [Events.click, Events.upload] + def __init__( self, label: str = "Upload a File", @@ -76,8 +81,7 @@ def __init__( self.file_types = file_types self.label = label self.variant = variant - IOComponent.__init__( - self, + super().__init__( label=label, visible=visible, elem_id=elem_id, @@ -89,6 +93,20 @@ def __init__( **kwargs, ) + def api_info(self) -> dict[str, list[str]]: + if self.file_count == "single": + return FileData.model_json_schema() + else: + return ListFiles.model_json_schema() + + def example_inputs(self) -> Any: + if self.file_count == "single": + return "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" + else: + return [ + "https://github.com/gradio-app/gradio/raw/main/test/test_files/sample_file.pdf" + ] + @staticmethod def update( value: str @@ -135,48 +153,30 @@ def preprocess( if x is None: return None - def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper: - file_name, data, is_file = ( - f["name"], - f["data"], - f.get("is_file", False), - ) - if self.type == "file": - if is_file: - path = self.make_temp_copy_if_needed(file_name) - else: - data, _ = client_utils.decode_base64_to_binary(data) - path = self.file_bytes_to_file(data, file_name=file_name) - path = str(utils.abspath(path)) - self.temp_files.add(path) - file = tempfile.NamedTemporaryFile( - delete=False, dir=self.DEFAULT_TEMP_DIR - ) - file.name = path - file.orig_name = file_name # type: ignore - return file - elif self.type == "bytes": - if is_file: - with open(file_name, "rb") as file_data: - return file_data.read() - return client_utils.decode_base64_to_binary(data)[0] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'file', 'bytes'." - ) - if self.file_count == "single": if isinstance(x, list): - return process_single_file(x[0]) + return File._process_single_file( + x[0], type=self.type, cache_dir=self.GRADIO_CACHE # type: ignore + ) else: - return process_single_file(x) + return File._process_single_file( + x, type=self.type, cache_dir=self.GRADIO_CACHE # type: ignore + ) else: if isinstance(x, list): - return [process_single_file(f) for f in x] + return [ + File._process_single_file( + f, type=self.type, cache_dir=self.GRADIO_CACHE # type: ignore + ) + for f in x + ] else: - return process_single_file(x) + return File._process_single_file( + x, type=self.type, cache_dir=self.GRADIO_CACHE # type: ignore + ) + + def postprocess(self, y): + return super().postprocess(y) def style( self, @@ -198,3 +198,7 @@ def style( if size is not None: self.size = size return self + + @property + def skip_api(self): + return False diff --git a/gradio/components/video.py b/gradio/components/video.py index 5b1d3e5a7473..cc5ee825e2a5 100644 --- a/gradio/components/video.py +++ b/gradio/components/video.py @@ -5,17 +5,16 @@ import tempfile import warnings from pathlib import Path -from typing import Callable, Literal +from typing import Any, Callable, Literal, Optional from gradio_client import utils as client_utils -from gradio_client.data_classes import FileData from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import VideoSerializable from gradio import processing_utils, utils, wasm_utils -from gradio.components.base import IOComponent, _Keywords +from gradio.components.base import Component, _Keywords +from gradio.data_classes import FileData, GradioModel from gradio.deprecation import warn_style_method_deprecation -from gradio.events import Changeable, Clearable, Playable, Recordable, Uploadable +from gradio.events import Events if not wasm_utils.IS_WASM: # TODO: Support ffmpeg on Wasm @@ -24,16 +23,13 @@ set_documentation_group("component") +class VideoData(GradioModel): + video: FileData + subtitles: Optional[FileData] = None + + @document() -class Video( - Changeable, - Clearable, - Playable, - Recordable, - Uploadable, - IOComponent, - VideoSerializable, -): +class Video(Component): """ Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output). For the video to be playable in the browser it must have a compatible container and codec combination. Allowed @@ -46,6 +42,19 @@ class Video( Demos: video_identity, video_subtitle """ + data_model = VideoData + input_data_model = FileData + EVENTS = [ + Events.change, + Events.clear, + Events.start_recording, + Events.stop_recording, + Events.stop, + Events.play, + Events.pause, + Events.end, + ] + def __init__( self, value: str @@ -115,8 +124,7 @@ def __init__( if show_share_button is None else show_share_button ) - IOComponent.__init__( - self, + super().__init__( label=label, every=every, show_label=show_label, @@ -170,9 +178,7 @@ def update( "__type__": "update", } - def preprocess( - self, x: tuple[FileData, FileData | None] | FileData | None - ) -> str | None: + def preprocess(self, x: dict | VideoData) -> str | None: """ Parameters: x: A tuple of (video file data, subtitle file data) or just video file data. @@ -181,30 +187,9 @@ def preprocess( """ if x is None: return None - elif isinstance(x, dict): - video = x - else: - video = x[0] - - file_name, file_data, is_file = ( - video.get("name"), - video["data"], - video.get("is_file", False), - ) - - if is_file: - if file_name is None: - raise ValueError("Received file data without a file name.") - if client_utils.is_http_url_like(file_name): - fn = self.download_temp_copy_if_needed - else: - fn = self.make_temp_copy_if_needed - file_name = Path(fn(file_name)) - else: - if file_data is None: - raise ValueError("Received empty file data.") - file_name = Path(self.base64_to_temp_file_if_needed(file_data, file_name)) - + data: VideoData = VideoData(**x) if isinstance(x, dict) else x + assert data.video.name + file_name = Path(data.video.name) uploaded_format = file_name.suffix.replace(".", "") needs_formatting = self.format is not None and uploaded_format != self.format flip = self.source == "webcam" and self.mirror_webcam @@ -248,7 +233,7 @@ def preprocess( def postprocess( self, y: str | Path | tuple[str | Path, str | Path | None] | None - ) -> tuple[FileData | None, FileData | None] | None: + ) -> VideoData | None: """ Processes a video to ensure that it is in the correct format before returning it to the front end. Parameters: @@ -289,8 +274,8 @@ def postprocess( ) else: raise Exception(f"Cannot process type as video: {type(y)}") - - return processed_files + assert processed_files[0] + return VideoData(video=processed_files[0], subtitles=processed_files[1]) def _format_video(self, video: str | Path | None) -> FileData | None: """ @@ -317,11 +302,13 @@ def _format_video(self, video: str | Path | None) -> FileData | None: # For cases where the video is a URL and does not need to be converted to another format, we can just return the URL if is_url and not (conversion_needed): - return {"name": video, "data": None, "is_file": True} + return FileData(name=video, is_file=True) # For cases where the video needs to be converted to another format if is_url: - video = self.download_temp_copy_if_needed(video) + video = processing_utils.save_url_to_cache( + video, cache_dir=self.GRADIO_CACHE + ) if ( processing_utils.ffmpeg_installed() and not processing_utils.video_is_playable(video) @@ -347,14 +334,7 @@ def _format_video(self, video: str | Path | None) -> FileData | None: ff.run() video = output_file_name - video = self.make_temp_copy_if_needed(video) - - return { - "name": video, - "data": None, - "is_file": True, - "orig_name": Path(video).name, - } + return FileData(name=video, data=None, is_file=True, orig_name=Path(video).name) def _format_subtitle(self, subtitle: str | Path | None) -> FileData | None: """ @@ -394,14 +374,14 @@ def srt_to_vtt(srt_file_path, vtt_file_path): # HTML5 only support vtt format if Path(subtitle).suffix == ".srt": temp_file = tempfile.NamedTemporaryFile( - delete=False, suffix=".vtt", dir=self.DEFAULT_TEMP_DIR + delete=False, suffix=".vtt", dir=self.GRADIO_CACHE ) srt_to_vtt(subtitle, temp_file.name) subtitle = temp_file.name subtitle_data = client_utils.encode_url_or_file_to_base64(subtitle) - return {"name": None, "data": subtitle_data, "is_file": False} + return FileData(name=None, data=subtitle_data, is_file=False) def style(self, *, height: int | None = None, width: int | None = None, **kwargs): """ @@ -413,3 +393,6 @@ def style(self, *, height: int | None = None, width: int | None = None, **kwargs if width is not None: self.width = width return self + + def example_inputs(self) -> Any: + return "https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4" diff --git a/gradio/data_classes.py b/gradio/data_classes.py index 514bed2accca..33a342e67828 100644 --- a/gradio/data_classes.py +++ b/gradio/data_classes.py @@ -1,9 +1,16 @@ """Pydantic data models and other dataclasses. This is the only file that uses Optional[] typing syntax instead of | None syntax to work with pydantic""" +from __future__ import annotations + +import pathlib +import secrets +import shutil +from abc import ABC, abstractmethod from enum import Enum, auto -from typing import Any, Dict, List, Optional, Union +from typing import Any, List, Optional, Union -from pydantic import BaseModel +from gradio_client.utils import traverse +from pydantic import BaseModel, RootModel, ValidationError from typing_extensions import Literal @@ -17,7 +24,7 @@ class PredictBody(BaseModel): bool ] = False # Whether the data is a batch of samples (i.e. called from the queue if batch=True) or a single sample (i.e. called from the UI) request: Optional[ - Union[Dict, List[Dict]] + Union[dict, List[dict]] ] = None # dictionary of request headers, query parameters, url, etc. (used to to pass in request for queuing) @@ -67,3 +74,90 @@ class LogMessage(BaseModel): msg: str = "log" log: str level: Literal["info", "warning"] + + +class GradioBaseModel(ABC): + def copy_to_dir(self, dir: str | pathlib.Path) -> GradioDataModel: + assert isinstance(self, (BaseModel, RootModel)) + if isinstance(dir, str): + dir = pathlib.Path(dir) + + # TODO: Making sure path is unique should be done in caller + def unique_copy(obj: dict): + data = FileData(**obj) + return data._copy_to_dir( + str(pathlib.Path(dir / secrets.token_hex(10))) + ).model_dump() + + return self.__class__.from_json( + x=traverse( + self.model_dump(), + unique_copy, + FileData.is_file_data, + ) + ) + + @classmethod + @abstractmethod + def from_json(cls, x) -> GradioDataModel: + pass + + +class GradioModel(GradioBaseModel, BaseModel): + @classmethod + def from_json(cls, x) -> GradioModel: + return cls(**x) + + +class GradioRootModel(GradioBaseModel, RootModel): + @classmethod + def from_json(cls, x) -> GradioRootModel: + return cls(root=x) + + +GradioDataModel = Union[GradioModel, GradioRootModel] + + +class FileData(GradioModel): + name: Optional[str] = None + data: Optional[str] = None # base64 encoded data + size: Optional[int] = None # size in bytes + is_file: Optional[bool] = None + orig_name: Optional[str] = None # original filename + mime_type: Optional[str] = None + + @property + def is_none(self): + return all( + f is None + for f in [ + self.name, + self.data, + self.size, + self.is_file, + self.orig_name, + self.mime_type, + ] + ) + + @classmethod + def from_path(cls, path: str) -> FileData: + return cls(name=path, is_file=True) + + def _copy_to_dir(self, dir: str) -> FileData: + pathlib.Path(dir).mkdir(exist_ok=True) + new_obj = dict(self) + if self.is_file: + assert self.name + new_name = shutil.copy(self.name, dir) + new_obj["name"] = new_name + return self.__class__(**new_obj) + + @classmethod + def is_file_data(cls, obj: Any): + if isinstance(obj, dict): + try: + return not FileData(**obj).is_none + except (TypeError, ValidationError): + return False + return False diff --git a/gradio/events.py b/gradio/events.py index 2fb5fdcf56d4..48a84d88cec6 100644 --- a/gradio/events.py +++ b/gradio/events.py @@ -3,22 +3,20 @@ from __future__ import annotations -from functools import wraps +import dataclasses +import string +from functools import partial, wraps from typing import TYPE_CHECKING, Any, Callable, Literal, Sequence -from gradio_client.documentation import document, set_documentation_group +from gradio_client.documentation import document + +if TYPE_CHECKING: + from gradio.blocks import Block, Component -from gradio.blocks import Block from gradio.context import Context from gradio.deprecation import warn_deprecation -from gradio.helpers import EventData from gradio.utils import get_cancel_function -if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from gradio.components import Component - -set_documentation_group("events") - def set_cancel_events( triggers: Sequence[EventListenerMethod], @@ -41,35 +39,35 @@ def set_cancel_events( outputs=None, queue=False, preprocess=False, + api_name=False, cancels=fn_indices_to_cancel, ) -class EventListener(Block): - def __init__(self: Any): - for event_listener_class in EventListener.__subclasses__(): - if isinstance(self, event_listener_class): - event_listener_class.__init__(self) - - class Dependency(dict): - def __init__(self, key_vals, dep_index, fn): + def __init__(self, trigger, key_vals, dep_index, fn): super().__init__(key_vals) self.fn = fn - self.then = EventListenerMethod( - None, - "then", - trigger_after=dep_index, - trigger_only_on_success=False, + self.then = partial( + EventListener( + "then", + trigger_after=dep_index, + trigger_only_on_success=False, + has_trigger=False, + ).listener, + trigger, ) """ Triggered after directly preceding event is completed, regardless of success or failure. """ - self.success = EventListenerMethod( - None, - "success", - trigger_after=dep_index, - trigger_only_on_success=True, + self.success = partial( + EventListener( + "success", + trigger_after=dep_index, + trigger_only_on_success=True, + has_trigger=False, + ).listener, + trigger, ) """ Triggered after directly preceding event is completed, if it was successful. @@ -79,138 +77,242 @@ def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) -class EventListenerMethod: +@document() +class EventData: """ - Triggered on an event deployment. + When a subclass of EventData is added as a type hint to an argument of an event listener method, this object will be passed as that argument. + It contains information about the event that triggered the listener, such the target object, and other data related to the specific event that are attributes of the subclass. + + Example: + table = gr.Dataframe([[1, 2, 3], [4, 5, 6]]) + gallery = gr.Gallery([("cat.jpg", "Cat"), ("dog.jpg", "Dog")]) + textbox = gr.Textbox("Hello World!") + + statement = gr.Textbox() + + def on_select(evt: gr.SelectData): # SelectData is a subclass of EventData + return f"You selected {evt.value} at {evt.index} from {evt.target}" + + table.select(on_select, None, statement) + gallery.select(on_select, None, statement) + textbox.select(on_select, None, statement) + Demos: gallery_selections, tictactoe """ + def __init__(self, target: Block | None, _data: Any): + """ + Parameters: + target: The target object that triggered the event. Can be used to distinguish if multiple components are bound to the same listener. + """ + self.target = target + self._data = _data + + +class SelectData(EventData): + def __init__(self, target: Block | None, data: Any): + super().__init__(target, data) + self.index: int | tuple[int, int] = data["index"] + """ + The index of the selected item. Is a tuple if the component is two dimensional or selection is a range. + """ + self.value: Any = data["value"] + """ + The value of the selected item. + """ + self.selected: bool = data.get("selected", True) + """ + True if the item was selected, False if deselected. + """ + + +@dataclasses.dataclass +class EventListenerMethod: + block: Block | None + event_name: str + + +class EventListener(str): + def __new__(cls, event_name, *args, **kwargs): + return super().__new__(cls, event_name) + def __init__( self, - trigger: Block | None, event_name: str, - show_progress: Literal["full", "minimal", "hidden"] = "full", + has_trigger: bool = True, + config_data: Callable[..., dict[str, Any]] = lambda: {}, + show_progress: Literal["full", "minimal", "hidden"] | None = None, callback: Callable | None = None, trigger_after: int | None = None, trigger_only_on_success: bool = False, ): - self.trigger = trigger + super().__init__() + self.has_trigger = has_trigger + self.config_data = config_data self.event_name = event_name self.show_progress = show_progress - self.callback = callback self.trigger_after = trigger_after self.trigger_only_on_success = trigger_only_on_success + self.callback = callback + self.listener = self._setup( + event_name, + has_trigger, + show_progress, + callback, + trigger_after, + trigger_only_on_success, + ) - def __call__( - self, - fn: Callable | None | Literal["decorator"] = "decorator", - inputs: Component | Sequence[Component] | set[Component] | None = None, - outputs: Component | Sequence[Component] | None = None, - api_name: str | None | Literal[False] = None, - status_tracker: None = None, - scroll_to_output: bool = False, - show_progress: Literal["full", "minimal", "hidden"] | None = None, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: dict[str, Any] | list[dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ) -> Dependency: - """ - Parameters: - fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list. - api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be exposed in the api docs as an unnamed endpoint, although this behavior will be changed in Gradio 4.0. If set to a string, the endpoint will be exposed in the api docs with the given name. - status_tracker: Deprecated and has no effect. - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app. - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - if fn == "decorator": - - def wrapper(func): - self.__call__( - func, - inputs, - outputs, - api_name, - status_tracker, - scroll_to_output, - show_progress, - queue, - batch, - max_batch_size, - preprocess, - postprocess, - cancels, - every, - _js, + @staticmethod + def _setup( + _event_name: str, + _has_trigger: bool, + _show_progress: Literal["full", "minimal", "hidden"] | None, + _callback: Callable | None, + _trigger_after: int | None, + _trigger_only_on_success: bool, + ): + def event_trigger( + block: Block | None, + fn: Callable | None, + inputs: Component | list[Component] | set[Component] | None = None, + outputs: Component | list[Component] | None = None, + api_name: str | None | Literal[False] = None, + status_tracker: None = None, + scroll_to_output: bool = False, + show_progress: Literal["full", "minimal", "hidden"] = "full", + queue: bool | None = None, + batch: bool = False, + max_batch_size: int = 4, + preprocess: bool = True, + postprocess: bool = True, + cancels: dict[str, Any] | list[dict[str, Any]] | None = None, + every: float | None = None, + _js: str | None = None, + ) -> Dependency: + """ + Parameters: + fn: the function to call when this event is triggered. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. + inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. + outputs: List of gradio.components to use as outputs. If the function returns no outputs, this should be an empty list. + api_name: Defines how the endpoint appears in the API docs. Can be a string, None, or False. If False, the endpoint will not be exposed in the api docs. If set to None, the endpoint will be given the name of the python function fn. If no fn is passed in, it will be given the name 'unnamed'. If set to a string, the endpoint will be exposed in the api docs with the given name. + status_tracker: Deprecated and has no effect. + scroll_to_output: If True, will scroll to output component on completion + show_progress: If True, will show progress animation while pending + queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app. + batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. + max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) + preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). + postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. + cancels: A list of other events to cancel when this listener is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. Functions that have not yet run (or generators that are iterating) will be cancelled, but functions that are currently running will be allowed to finish. + every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. + """ + + if fn == "decorator": + + def wrapper(func): + event_trigger( + block, + func, + inputs, + outputs, + api_name, + status_tracker, + scroll_to_output, + show_progress, + queue, + batch, + max_batch_size, + preprocess, + postprocess, + cancels, + every, + _js, + ) + + @wraps(func) + def inner(*args, **kwargs): + return func(*args, **kwargs) + + return inner + + return Dependency(None, {}, None, wrapper) + + if status_tracker: + warn_deprecation( + "The 'status_tracker' parameter has been deprecated and has no effect." + ) + if _event_name == "stop": + warn_deprecation( + "The `stop` event on Video and Audio has been deprecated and will be remove in a future version. Use `ended` instead." + ) + if block and "stream" in block.events: + block.check_streamable() # type: ignore + if isinstance(show_progress, bool): + show_progress = "full" if show_progress else "hidden" + + if api_name is None: + if fn is not None: + if not hasattr(fn, "__name__"): + if hasattr(fn, "__class__") and hasattr( + fn.__class__, "__name__" + ): + name = fn.__class__.__name__ + else: + name = "unnamed" + else: + name = fn.__name__ + api_name = "".join( + [ + s + for s in name + if s not in set(string.punctuation) - {"-", "_"} + ] + ) + else: + # Don't document _js only events + api_name = False + + if Context.root_block is None: + raise AttributeError( + "Cannot call {self.event_name} outside of a gradio.Blocks context." ) - @wraps(func) - def inner(*args, **kwargs): - return func(*args, **kwargs) - - return inner - - return Dependency({}, None, wrapper) - - if status_tracker: - warn_deprecation( - "The 'status_tracker' parameter has been deprecated and has no effect." + dep, dep_index = Context.root_block.set_event_trigger( + [EventListenerMethod(block if _has_trigger else None, _event_name)], + fn, + inputs, + outputs, + preprocess=preprocess, + postprocess=postprocess, + scroll_to_output=scroll_to_output, + show_progress=show_progress + if show_progress is not None + else _show_progress, + api_name=api_name, + js=_js, + queue=queue, + batch=batch, + max_batch_size=max_batch_size, + every=every, + trigger_after=_trigger_after, + trigger_only_on_success=_trigger_only_on_success, ) - if self.event_name == "stop": - warn_deprecation( - "The `stop` event on Video and Audio has been deprecated and will be remove in a future version. Use `ended` instead." + set_cancel_events( + [EventListenerMethod(block if _has_trigger else None, _event_name)], + cancels, ) + if _callback: + _callback(block) + return Dependency(block, dep, dep_index, fn) - if isinstance(self, Streamable): - self.check_streamable() - if isinstance(show_progress, bool): - show_progress = "full" if show_progress else "hidden" - - if Context.root_block is None: - raise AttributeError( - "Cannot call {self.event_name} outside of a gradio.Blocks context." - ) - - dep, dep_index = Context.root_block.set_event_trigger( - [self], - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress - if show_progress is not None - else self.show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - trigger_after=self.trigger_after, - trigger_only_on_success=self.trigger_only_on_success, - ) - set_cancel_events([self], cancels) - if self.callback: - self.callback() - return Dependency(dep, dep_index, fn) + event_trigger.event_name = _event_name + event_trigger.has_trigger = _has_trigger + return event_trigger +# TODO: Fix type def on( - triggers: Sequence[EventListenerMethod] | EventListenerMethod | None = None, + triggers: Sequence[Any] | Any | None = None, fn: Callable | None | Literal["decorator"] = "decorator", inputs: Component | list[Component] | set[Component] | None = None, outputs: Component | list[Component] | None = None, @@ -246,7 +348,7 @@ def on( """ from gradio.components.base import Component - if isinstance(triggers, EventListenerMethod): + if isinstance(triggers, EventListener): triggers = [triggers] if isinstance(inputs, Component): inputs = [inputs] @@ -278,13 +380,14 @@ def inner(*args, **kwargs): return inner - return Dependency({}, None, wrapper) + return Dependency(None, {}, None, wrapper) if Context.root_block is None: raise Exception("Cannot call on() outside of a gradio.Blocks context.") if triggers is None: - triggers = [input.change for input in inputs] if inputs is not None else [] - + triggers = [EventListenerMethod(input, "change") for input in inputs] if inputs is not None else [] # type: ignore + else: + triggers = [EventListenerMethod(t.__self__ if t.has_trigger else None, t.event_name) for t in triggers] # type: ignore dep, dep_index = Context.root_block.set_event_trigger( triggers, fn, @@ -302,216 +405,42 @@ def inner(*args, **kwargs): every=every, ) set_cancel_events(triggers, cancels) - return Dependency(dep, dep_index, fn) - - -@document("*change", inherit=True) -class Changeable(EventListener): - def __init__(self): - self.change = EventListenerMethod(self, "change") - """ - This listener is triggered when the component's value changes either because of user input (e.g. a user types in a textbox) OR because of a function update (e.g. an image receives a value from the output of an event trigger). - See `.input()` for a listener that is only triggered by user input. - """ - - -@document("*input", inherit=True) -class Inputable(EventListener): - def __init__(self): - self.input = EventListenerMethod(self, "input") - """ - This listener is triggered when the user changes the value of the component. - """ - - -@document("*click", inherit=True) -class Clickable(EventListener): - def __init__(self): - self.click = EventListenerMethod(self, "click") - """ - This listener is triggered when the component (e.g. a button) is clicked. - """ - - -@document("*submit", inherit=True) -class Submittable(EventListener): - def __init__(self): - self.submit = EventListenerMethod(self, "submit") - """ - This listener is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. - """ - - -@document("*edit", inherit=True) -class Editable(EventListener): - def __init__(self): - self.edit = EventListenerMethod(self, "edit") - """ - This listener is triggered when the user edits the component (e.g. image) using the - built-in editor. - """ - - -@document("*clear", inherit=True) -class Clearable(EventListener): - def __init__(self): - self.clear = EventListenerMethod(self, "clear") - """ - This listener is triggered when the user clears the component (e.g. image or audio) - using the X button for the component. - """ - - -@document("*play", "*pause", "*stop", "*end", inherit=True) -class Playable(EventListener): - def __init__(self): - self.play = EventListenerMethod(self, "play") - """ - This listener is triggered when the user plays the component (e.g. audio or video). - """ - - self.pause = EventListenerMethod(self, "pause") - """ - This listener is triggered when the media stops playing for any reason (e.g. audio or video). - """ - - self.stop = EventListenerMethod(self, "stop") - """ - This listener is triggered when the user reaches the end of the media track (e.g. audio or video). - """ - - self.end = EventListenerMethod(self, "end") - """ - This listener is triggered when the user reaches the end of the media track (e.g. audio or video). - """ - - -@document("*stream", inherit=True) -class Streamable(EventListener): - def __init__(self): - self.streaming: bool - self.stream = EventListenerMethod( - self, - "stream", - show_progress="hidden", - callback=lambda: setattr(self, "streaming", True), - ) - """ - This listener is triggered when the user streams the component (e.g. a live webcam - component). - """ - - def check_streamable(self): - pass - - -class StreamableOutput(EventListener): - def __init__(self): - self.streaming: bool - - def stream_output(self, y, output_id: str, first_chunk: bool) -> tuple[bytes, Any]: - raise NotImplementedError - - -@document("*start_recording", "*stop_recording", inherit=True) -class Recordable(EventListener): - def __init__(self): - self.start_recording = EventListenerMethod(self, "start_recording") - """ - This listener is triggered when the user starts recording with the component (e.g. audio or video). - """ - - self.stop_recording = EventListenerMethod(self, "stop_recording") - """ - This listener is triggered when the user stops recording with the component (e.g. audio or video). - """ - - -@document("*focus", "*blur", inherit=True) -class Focusable(EventListener): - def __init__(self): - self.focus = EventListenerMethod(self, "focus") - """ - This listener is triggered when the component is focused (e.g. when the user clicks inside a textbox). - """ - - self.blur = EventListenerMethod(self, "blur") - """ - This listener is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). - """ - - -@document("*upload", inherit=True) -class Uploadable(EventListener): - def __init__(self): - self.upload = EventListenerMethod(self, "upload") - """ - This listener is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). - """ - - -@document("*release", inherit=True) -class Releaseable(EventListener): - def __init__(self): - self.release = EventListenerMethod(self, "release") - """ - This listener is triggered when the user releases the mouse on this component (e.g. when the user releases the slider). - """ - - -@document("*select", inherit=True) -class Selectable(EventListener): - def __init__(self): - self.selectable: bool = False - self.select = EventListenerMethod( - self, "select", callback=lambda: setattr(self, "selectable", True) - ) - """ - This listener is triggered when the user selects from within the Component. - This event has EventData of type gradio.SelectData that carries information, accessible through SelectData.index and SelectData.value. - See EventData documentation on how to use this event data. - """ - - def get_config(self): - config = super().get_config() - config["selectable"] = self.selectable - return config - - -class SelectData(EventData): - def __init__(self, target: Block | None, data: Any): - super().__init__(target, data) - self.index: int | tuple[int, int] = data["index"] - """ - The index of the selected item. Is a tuple if the component is two dimensional or selection is a range. - """ - self.value: Any = data["value"] - """ - The value of the selected item. - """ - self.selected: bool = data.get("selected", True) - """ - True if the item was selected, False if deselected. - """ - - -@document("*like", inherit=True) -class Likeable(EventListener): - def __init__(self): - self.likeable: bool = False - self.like = EventListenerMethod( - self, "like", callback=lambda: setattr(self, "likeable", True) - ) - """ - This listener is triggered when the user likes/dislikes from within the Component. - This event has EventData of type gradio.LikeData that carries information, accessible through LikeData.index and LikeData.value. - See EventData documentation on how to use this event data. - """ - - def get_config(self): - config = super().get_config() - config["likeable"] = self.likeable - return config + return Dependency(None, dep, dep_index, fn) + + +class Events: + change = "change" + input = "input" + click = "click" + submit = "submit" + edit = "edit" + clear = "clear" + play = "play" + pause = "pause" + stop = "stop" + end = "end" + start_recording = "start_recording" + stop_recording = "stop_recording" + focus = "focus" + blur = "blur" + upload = "upload" + release = "release" + select = EventListener( + "select", + config_data=lambda: {"selectable": False}, + callback=lambda block: setattr(block, "selectable", True), + ) + stream = EventListener( + "stream", + show_progress="hidden", + config_data=lambda: {"streamable": False}, + callback=lambda block: setattr(block, "streaming", True), + ) + like = EventListener( + "like", + config_data=lambda: {"likeable": False}, + callback=lambda block: setattr(block, "likeable", True), + ) class LikeData(EventData): diff --git a/gradio/exceptions.py b/gradio/exceptions.py index b8ed0989ae06..9667e2c9faed 100644 --- a/gradio/exceptions.py +++ b/gradio/exceptions.py @@ -81,3 +81,7 @@ def __init__(self, message: str = "Error raised."): def __str__(self): return repr(self.message) + + +class ComponentDefinitionError(NotImplementedError): + pass diff --git a/gradio/flagging.py b/gradio/flagging.py index a226fb8d50e4..5d2644f06d87 100644 --- a/gradio/flagging.py +++ b/gradio/flagging.py @@ -21,7 +21,7 @@ from gradio.deprecation import warn_deprecation if TYPE_CHECKING: - from gradio.components import IOComponent + from gradio.components import Component set_documentation_group("flagging") @@ -32,7 +32,7 @@ class FlaggingCallback(ABC): """ @abstractmethod - def setup(self, components: list[IOComponent], flagging_dir: str): + def setup(self, components: list[Component], flagging_dir: str): """ This method should be overridden and ensure that everything is set up correctly for flag(). This method gets called once at the beginning of the Interface.launch() method. @@ -80,7 +80,7 @@ def image_classifier(inp): def __init__(self): pass - def setup(self, components: list[IOComponent], flagging_dir: str | Path): + def setup(self, components: list[Component], flagging_dir: str | Path): self.components = components self.flagging_dir = flagging_dir os.makedirs(flagging_dir, exist_ok=True) @@ -99,11 +99,11 @@ def flag( save_dir = Path( flagging_dir ) / client_utils.strip_invalid_filename_characters(component.label or "") + save_dir.mkdir(exist_ok=True) csv_data.append( - component.deserialize( + component.flag( sample, save_dir, - None, ) ) @@ -135,7 +135,7 @@ def __init__(self): def setup( self, - components: list[IOComponent], + components: list[Component], flagging_dir: str | Path, ): self.components = components @@ -167,11 +167,12 @@ def flag( ) / client_utils.strip_invalid_filename_characters( getattr(component, "label", None) or f"component {idx}" ) + save_dir.mkdir(exist_ok=True) if utils.is_update(sample): csv_data.append(str(sample)) else: csv_data.append( - component.deserialize(sample, save_dir=save_dir) + component.flag(sample, flag_dir=save_dir) if sample is not None else "" ) @@ -234,7 +235,7 @@ def __init__( self.info_filename = info_filename self.separate_dirs = separate_dirs - def setup(self, components: list[IOComponent], flagging_dir: str): + def setup(self, components: list[Component], flagging_dir: str): """ Params: flagging_dir (str): local directory where the dataset is cloned, @@ -425,7 +426,8 @@ def _deserialize_components( # Get deserialized object (will save sample to disk if applicable -file, audio, image,...-) label = component.label or "" save_dir = data_dir / client_utils.strip_invalid_filename_characters(label) - deserialized = component.deserialize(sample, save_dir, None) + save_dir.mkdir(exist_ok=True, parents=True) + deserialized = component.flag(sample, save_dir) # Add deserialized object to row features[label] = {"dtype": "string", "_type": "Value"} diff --git a/gradio/helpers.py b/gradio/helpers.py index 794e0a98c998..893a789260f7 100644 --- a/gradio/helpers.py +++ b/gradio/helpers.py @@ -25,12 +25,13 @@ from gradio import components, oauth, processing_utils, routes, utils, wasm_utils from gradio.context import Context, LocalContext +from gradio.data_classes import GradioModel, GradioRootModel +from gradio.events import EventData from gradio.exceptions import Error from gradio.flagging import CSVLogger if TYPE_CHECKING: # Only import for type checking (to avoid circular imports). - from gradio.blocks import Block - from gradio.components import IOComponent + from gradio.components import Component CACHED_FOLDER = "gradio_cached_examples" LOG_FILE = "log.csv" @@ -40,8 +41,8 @@ def create_examples( examples: list[Any] | list[list[Any]] | str, - inputs: IOComponent | list[IOComponent], - outputs: IOComponent | list[IOComponent] | None = None, + inputs: Component | list[Component], + outputs: Component | list[Component] | None = None, fn: Callable | None = None, cache_examples: bool = False, examples_per_page: int = 10, @@ -91,8 +92,8 @@ class Examples: def __init__( self, examples: list[Any] | list[list[Any]] | str, - inputs: IOComponent | list[IOComponent], - outputs: IOComponent | list[IOComponent] | None = None, + inputs: Component | list[Component], + outputs: Component | list[Component] | None = None, fn: Callable | None = None, cache_examples: bool = False, examples_per_page: int = 10, @@ -206,13 +207,19 @@ def __init__( self.batch = batch with utils.set_directory(working_directory): - self.processed_examples = [ - [ - component.postprocess(sample) - for component, sample in zip(inputs, example) - ] - for example in examples - ] + self.processed_examples = [] + for example in examples: + sub = [] + for component, sample in zip(inputs, example): + prediction_value = component.postprocess(sample) + if isinstance(prediction_value, (GradioRootModel, GradioModel)): + prediction_value = prediction_value.model_dump() + prediction_value = processing_utils.move_files_to_cache( + prediction_value, component + ) + sub.append(prediction_value) + self.processed_examples.append(sub) + self.non_none_processed_examples = [ [ex for (ex, keep) in zip(example, input_has_examples) if keep] for example in self.processed_examples @@ -410,23 +417,21 @@ def load_from_cache(self, example_id: int) -> list[Any]: output.append(value_as_dict) except (ValueError, TypeError, SyntaxError, AssertionError): output.append( - component.serialize( - value_to_use, self.cached_folder, allow_links=True + component.read_from_flag( + value_to_use, + self.cached_folder, ) ) return output def merge_generated_values_into_output( - components: list[IOComponent], generated_values: list, output: list + components: list[Component], generated_values: list, output: list ): - from gradio.events import StreamableOutput + from gradio.components.base import StreamingOutput for output_index, output_component in enumerate(components): - if ( - isinstance(output_component, StreamableOutput) - and output_component.streaming - ): + if isinstance(output_component, StreamingOutput) and output_component.streaming: binary_chunks = [] for i, chunk in enumerate(generated_values): if len(components) > 1: @@ -1059,37 +1064,6 @@ def _animate(_): return output_mp4.name -@document() -class EventData: - """ - When a subclass of EventData is added as a type hint to an argument of an event listener method, this object will be passed as that argument. - It contains information about the event that triggered the listener, such the target object, and other data related to the specific event that are attributes of the subclass. - - Example: - table = gr.Dataframe([[1, 2, 3], [4, 5, 6]]) - gallery = gr.Gallery([("cat.jpg", "Cat"), ("dog.jpg", "Dog")]) - textbox = gr.Textbox("Hello World!") - - statement = gr.Textbox() - - def on_select(evt: gr.SelectData): # SelectData is a subclass of EventData - return f"You selected {evt.value} at {evt.index} from {evt.target}" - - table.select(on_select, None, statement) - gallery.select(on_select, None, statement) - textbox.select(on_select, None, statement) - Demos: gallery_selections, tictactoe - """ - - def __init__(self, target: Block | None, _data: Any): - """ - Parameters: - target: The target object that triggered the event. Can be used to distinguish if multiple components are bound to the same listener. - """ - self.target = target - self._data = _data - - def log_message(message: str, level: Literal["info", "warning"] = "info"): from gradio.context import LocalContext diff --git a/gradio/inputs.py b/gradio/inputs.py deleted file mode 100644 index 9345530649a0..000000000000 --- a/gradio/inputs.py +++ /dev/null @@ -1,451 +0,0 @@ -# type: ignore -""" -This module defines various classes that can serve as the `input` to an interface. Each class must inherit from -`InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are -automatically added to a registry, which allows them to be easily referenced in other parts of the code. -""" - -from __future__ import annotations - -from typing import Any, Optional - -from gradio import components -from gradio.deprecation import warn_deprecation - - -def warn_inputs_deprecation(): - warn_deprecation( - "Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components", - ) - - -class Textbox(components.Textbox): - def __init__( - self, - lines: int = 1, - placeholder: Optional[str] = None, - default: str = "", - numeric: Optional[bool] = False, - type: Optional[str] = "text", - label: Optional[str] = None, - optional: bool = False, - ): - warn_inputs_deprecation() - super().__init__( - value=default, - lines=lines, - placeholder=placeholder, - label=label, - numeric=numeric, - type=type, - optional=optional, - ) - - -class Number(components.Number): - """ - Component creates a field for user to enter numeric input. Provides a number as an argument to the wrapped function. - Input type: float - """ - - def __init__( - self, - default: Optional[float] = None, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - default (float): default value. - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no value for this component. - """ - warn_inputs_deprecation() - super().__init__(value=default, label=label, optional=optional) - - -class Slider(components.Slider): - """ - Component creates a slider that ranges from `minimum` to `maximum`. Provides number as an argument to the wrapped function. - Input type: float - """ - - def __init__( - self, - minimum: float = 0, - maximum: float = 100, - step: Optional[float] = None, - default: Optional[float] = None, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - minimum (float): minimum value for slider. - maximum (float): maximum value for slider. - step (float): increment between slider values. - default (float): default value. - label (str): component name in interface. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - - super().__init__( - value=default, - minimum=minimum, - maximum=maximum, - step=step, - label=label, - optional=optional, - ) - - -class Checkbox(components.Checkbox): - """ - Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function. - Input type: bool - """ - - def __init__( - self, - default: bool = False, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - label (str): component name in interface. - default (bool): if True, checked by default. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - super().__init__(value=default, label=label, optional=optional) - - -class CheckboxGroup(components.CheckboxGroup): - """ - Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function. - Input type: Union[List[str], List[int]] - """ - - def __init__( - self, - choices: list[str], - default: list[str] | None = None, - type: str = "value", - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - choices (List[str]): list of options to select from. - default (List[str]): default selected list of options. - type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indices of the choices selected. - label (str): component name in interface. - optional (bool): this parameter is ignored. - """ - if default is None: - default = [] - warn_inputs_deprecation() - super().__init__( - value=default, - choices=choices, - type=type, - label=label, - optional=optional, - ) - - -class Radio(components.Radio): - """ - Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. - Input type: Union[str, int] - """ - - def __init__( - self, - choices: list[str], - type: str = "value", - default: Optional[str] = None, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - choices (List[str]): list of options to select from. - type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. - default (str): the button selected by default. If None, no button is selected by default. - label (str): component name in interface. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - super().__init__( - choices=choices, - type=type, - value=default, - label=label, - optional=optional, - ) - - -class Dropdown(components.Dropdown): - """ - Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. - Input type: Union[str, int] - """ - - def __init__( - self, - choices: list[str], - type: str = "value", - default: Optional[str] = None, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - choices (List[str]): list of options to select from. - type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. - default (str): default value selected in dropdown. If None, no value is selected by default. - label (str): component name in interface. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - super().__init__( - choices=choices, - type=type, - value=default, - label=label, - optional=optional, - ) - - -class Image(components.Image): - """ - Component creates an image upload box with editing capabilities. - Input type: Union[numpy.array, PIL.Image, file-object] - """ - - def __init__( - self, - shape: tuple[int, int] = None, - image_mode: str = "RGB", - invert_colors: bool = False, - source: str = "upload", - tool: str = "editor", - type: str = "numpy", - label: str = None, - optional: bool = False, - ): - """ - Parameters: - shape (Tuple[int, int]): (width, height) shape to crop and resize image to; if None, matches input image size. - image_mode (str): How to process the uploaded image. Accepts any of the PIL image modes, e.g. "RGB" for color images, "RGBA" to include the transparency mask, "L" for black-and-white images. - invert_colors (bool): whether to invert the image as a preprocessing step. - source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools. - tool (str): Tools used for editing. "editor" allows a full screen editor, "select" provides a cropping and zoom tool. - type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name, "filepath" returns the path directly. - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__( - shape=shape, - image_mode=image_mode, - invert_colors=invert_colors, - source=source, - tool=tool, - type=type, - label=label, - optional=optional, - ) - - -class Video(components.Video): - """ - Component creates a video file upload that is converted to a file path. - - Input type: filepath - """ - - def __init__( - self, - type: Optional[str] = None, - source: str = "upload", - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - type (str): Type of video format to be returned by component, such as 'avi' or 'mp4'. If set to None, video will keep uploaded format. - source (str): Source of video. "upload" creates a box where user can drop an video file, "webcam" allows user to record a video from their webcam. - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded video, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__(format=type, source=source, label=label, optional=optional) - - -class Audio(components.Audio): - """ - Component accepts audio input files. - Input type: Union[Tuple[int, numpy.array], file-object, numpy.array] - """ - - def __init__( - self, - source: str = "upload", - type: str = "numpy", - label: str = None, - optional: bool = False, - ): - """ - Parameters: - source (str): Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input. - type (str): Type of value to be returned by component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file object whose path can be retrieved by file_obj.name, "filepath" returns the path directly. - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded audio, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__(source=source, type=type, label=label, optional=optional) - - -class File(components.File): - """ - Component accepts generic file uploads. - Input type: Union[file-object, bytes, List[Union[file-object, bytes]]] - """ - - def __init__( - self, - file_count: str = "single", - type: str = "file", - label: Optional[str] = None, - keep_filename: bool = True, - optional: bool = False, - ): - """ - Parameters: - file_count (str): if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory". - type (str): Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object. - label (str): component name in interface. - keep_filename (bool): DEPRECATED. Original filename always kept. - optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__( - file_count=file_count, - type=type, - label=label, - keep_filename=keep_filename, - optional=optional, - ) - - -class Dataframe(components.Dataframe): - """ - Component accepts 2D input through a spreadsheet interface. - Input type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]] - """ - - def __init__( - self, - headers: Optional[list[str]] = None, - row_count: int = 3, - col_count: Optional[int] = 3, - datatype: str | list[str] = "str", - col_width: int | list[int] = None, - default: Optional[list[list[Any]]] = None, - type: str = "pandas", - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - headers (List[str]): Header names to dataframe. If None, no headers are shown. - row_count (int): Limit number of rows for input. - col_count (int): Limit number of columns for input. If equal to 1, return data will be one-dimensional. Ignored if `headers` is provided. - datatype (Union[str, List[str]]): Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", and "date". - col_width (Union[int, List[int]]): Width of columns in pixels. Can be provided as single value or list of values per column. - default (List[List[Any]]): Default value - type (str): Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array. - label (str): component name in interface. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - super().__init__( - value=default, - headers=headers, - row_count=row_count, - col_count=col_count, - datatype=datatype, - col_width=col_width, - type=type, - label=label, - optional=optional, - ) - - -class Timeseries(components.Timeseries): - """ - Component accepts pandas.DataFrame uploaded as a timeseries csv file. - Input type: pandas.DataFrame - """ - - def __init__( - self, - x: Optional[str] = None, - y: str | list[str] = None, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series. - y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series. - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded csv file, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__(x=x, y=y, label=label, optional=optional) - - -class State(components.State): - """ - Special hidden component that stores state across runs of the interface. - Input type: Any - """ - - def __init__( - self, - label: str = None, - default: Any = None, - ): - """ - Parameters: - label (str): component name in interface (not used). - default (Any): the initial value of the state. - optional (bool): this parameter is ignored. - """ - warn_inputs_deprecation() - super().__init__(value=default, label=label) - - -class Image3D(components.Model3D): - """ - Used for 3D image model output. - Input type: File object of type (.obj, glb, or .gltf) - """ - - def __init__( - self, - label: Optional[str] = None, - optional: bool = False, - ): - """ - Parameters: - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None. - """ - warn_inputs_deprecation() - super().__init__(label=label, optional=optional) diff --git a/gradio/interface.py b/gradio/interface.py index c4534e35a197..1681b75d5620 100644 --- a/gradio/interface.py +++ b/gradio/interface.py @@ -13,21 +13,20 @@ from gradio_client.documentation import document, set_documentation_group -from gradio import Examples, external, interpretation, utils +from gradio import Examples, external, utils from gradio.blocks import Blocks from gradio.components import ( Button, ClearButton, + Component, DuplicateButton, - Interpretation, - IOComponent, Markdown, State, get_component_instance, ) from gradio.data_classes import InterfaceTypes from gradio.deprecation import warn_deprecation -from gradio.events import Changeable, Streamable, Submittable, on +from gradio.events import Events, on from gradio.exceptions import RenderError from gradio.flagging import CSVLogger, FlaggingCallback, FlagMethod from gradio.layouts import Column, Row, Tab, Tabs @@ -39,8 +38,6 @@ if TYPE_CHECKING: # Only import for type checking (is False at runtime). from transformers.pipelines.base import Pipeline - from gradio.events import EventListenerMethod - @document("launch", "load", "from_pipeline", "integrate", "queue") class Interface(Blocks): @@ -125,14 +122,12 @@ def from_pipeline(cls, pipeline: Pipeline, **kwargs) -> Interface: def __init__( self, fn: Callable, - inputs: str | IOComponent | list[str | IOComponent] | None, - outputs: str | IOComponent | list[str | IOComponent] | None, + inputs: str | Component | list[str | Component] | None, + outputs: str | Component | list[str | Component] | None, examples: list[Any] | list[list[Any]] | str | None = None, cache_examples: bool | None = None, examples_per_page: int = 10, live: bool = False, - interpretation: Callable | str | None = None, - num_shap: float = 2.0, title: str | None = None, description: str | None = None, article: str | None = None, @@ -142,7 +137,7 @@ def __init__( allow_flagging: str | None = None, flagging_options: list[str] | list[tuple[str, str]] | None = None, flagging_dir: str = "flagged", - flagging_callback: FlaggingCallback = CSVLogger(), + flagging_callback: FlaggingCallback | None = None, analytics_enabled: bool | None = None, batch: bool = False, max_batch_size: int = 4, @@ -160,8 +155,6 @@ def __init__( cache_examples: If True, caches examples in the server for fast runtime in examples. If `fn` is a generator function, then the last yielded value will be used as the output. The default option in HuggingFace Spaces is True. The default option elsewhere is False. examples_per_page: If examples are provided, how many to display per page. live: whether the interface should automatically rerun if any of the inputs change. - interpretation: function that provides interpretation explaining prediction output. Pass "default" to use simple built-in interpreter, "shap" to use a built-in shapley-based interpreter, or your own custom interpretation function. For more information on the different interpretation methods, see the Advanced Interface Features guide. - num_shap: a multiplier that determines how many examples are computed for shap-based interpretation. Increasing this value will increase shap runtime, but improve results. Only applies if interpretation is "shap". title: a title for the interface; if provided, appears above the input and output components in large font. Also used as the tab title when opened in a browser window. description: a description for the interface; if provided, appears above the input and output components and beneath the title in regular font. Accepts Markdown and HTML content. article: an expanded article explaining the interface; if provided, appears below the input and output components in regular font. Accepts Markdown and HTML content. @@ -205,8 +198,8 @@ def __init__( inputs = [] self.interface_type = InterfaceTypes.OUTPUT_ONLY - assert isinstance(inputs, (str, list, IOComponent)) - assert isinstance(outputs, (str, list, IOComponent)) + assert isinstance(inputs, (str, list, Component)) + assert isinstance(outputs, (str, list, Component)) if not isinstance(inputs, list): inputs = [inputs] @@ -258,7 +251,7 @@ def __init__( ] for component in self.input_components + self.output_components: - if not (isinstance(component, IOComponent)): + if not (isinstance(component, Component)): raise ValueError( f"{component} is not a valid input/output component for Interface." ) @@ -275,23 +268,11 @@ def __init__( InterfaceTypes.OUTPUT_ONLY, ]: for o in self.output_components: - assert isinstance(o, IOComponent) + assert isinstance(o, Component) if o.interactive is None: # Unless explicitly otherwise specified, force output components to # be non-interactive o.interactive = False - if ( - interpretation is None - or isinstance(interpretation, list) - or callable(interpretation) - ): - self.interpretation = interpretation - elif isinstance(interpretation, str): - self.interpretation = [ - interpretation.lower() for _ in self.input_components - ] - else: - raise ValueError("Invalid value for parameter: interpretation") self.api_mode = _api_mode self.fn = fn @@ -309,7 +290,6 @@ def __init__( self.thumbnail = thumbnail self.examples = examples - self.num_shap = num_shap self.examples_per_page = examples_per_page self.simple_server = None @@ -359,8 +339,11 @@ def __init__( "flagging_options must be a list of strings or list of (string, string) tuples." ) + if not flagging_callback: + flagging_callback = CSVLogger() self.flagging_callback = flagging_callback self.flagging_dir = flagging_dir + self.batch = batch self.max_batch_size = max_batch_size self.allow_duplication = allow_duplication @@ -380,11 +363,11 @@ def __init__( if utils.is_special_typed_parameter(param_name, param_types): param_names.remove(param_name) for component, param_name in zip(self.input_components, param_names): - assert isinstance(component, IOComponent) + assert isinstance(component, Component) if component.label is None: component.label = param_name for i, component in enumerate(self.output_components): - assert isinstance(component, IOComponent) + assert isinstance(component, Component) if component.label is None: if len(self.output_components) == 1: component.label = "output" @@ -415,7 +398,6 @@ def __init__( None, None, ) - interpretation_btn, interpretation_set = None, None input_component_column, interpret_component_column = None, None with Row(equal_height=False): @@ -430,8 +412,6 @@ def __init__( stop_btn, flag_btns, input_component_column, - interpret_component_column, - interpretation_set, ) = self.render_input_column() if self.interface_type in [ InterfaceTypes.STANDARD, @@ -443,7 +423,6 @@ def __init__( duplicate_btn, stop_btn_2_out, flag_btns_out, - interpretation_btn, ) = self.render_output_column(submit_btn) submit_btn = submit_btn or submit_btn_out clear_btn = clear_btn or clear_btn_2_out @@ -459,12 +438,6 @@ def __init__( ) if duplicate_btn is not None: duplicate_btn.activate() - self.attach_interpretation_events( - interpretation_btn, - interpretation_set, - input_component_column, - interpret_component_column, - ) self.attach_flagging_events(flag_btns, clear_btn) self.render_examples() @@ -491,23 +464,14 @@ def render_input_column( Button | None, list[Button] | None, Column, - Column | None, - list[Interpretation] | None, ]: submit_btn, clear_btn, stop_btn, flag_btns = None, None, None, None - interpret_component_column, interpretation_set = None, None with Column(variant="panel"): input_component_column = Column() with input_component_column: for component in self.input_components: component.render() - if self.interpretation: - interpret_component_column = Column(visible=False) - interpretation_set = [] - with interpret_component_column: - for component in self.input_components: - interpretation_set.append(Interpretation(component)) with Row(): if self.interface_type in [ InterfaceTypes.STANDARD, @@ -543,8 +507,6 @@ def render_input_column( stop_btn, flag_btns, input_component_column, - interpret_component_column, - interpretation_set, ) def render_output_column( @@ -553,14 +515,12 @@ def render_output_column( ) -> tuple[ Button | None, ClearButton | None, - DuplicateButton, + DuplicateButton | None, Button | None, list | None, - Button | None, ]: submit_btn = submit_btn_in - interpretation_btn, clear_btn, duplicate_btn, flag_btns, stop_btn = ( - None, + clear_btn, duplicate_btn, flag_btns, stop_btn = ( None, None, None, @@ -592,9 +552,6 @@ def render_output_column( raise RenderError("Submit button not rendered") flag_btns = [submit_btn] - if self.interpretation: - interpretation_btn = Button("Interpret") - if self.allow_duplication: duplicate_btn = DuplicateButton(scale=1, size="lg", _activate=False) @@ -604,7 +561,6 @@ def render_output_column( duplicate_btn, stop_btn, flag_btns, - interpretation_btn, ) def render_article(self): @@ -630,12 +586,12 @@ def attach_submit_events(self, submit_btn: Button | None, stop_btn: Button | Non max_batch_size=self.max_batch_size, ) else: - events: list[EventListenerMethod] = [] + events: list[Callable] = [] for component in self.input_components: - if isinstance(component, Streamable) and component.streaming: - events.append(component.stream) - elif isinstance(component, Changeable): - events.append(component.change) + if component.has_event("stream") and component.streaming: # type: ignore + events.append(component.stream) # type: ignore + elif component.has_event("change"): + events.append(component.change) # type: ignore on( events, self.fn, @@ -652,9 +608,9 @@ def attach_submit_events(self, submit_btn: Button | None, stop_btn: Button | Non extra_output = [] triggers = [submit_btn.click] + [ - component.submit + component.submit # type: ignore for component in self.input_components - if isinstance(component, Submittable) + if component.has_event(Events.submit) ] if stop_btn: @@ -697,6 +653,7 @@ def cleanup(): outputs=[submit_btn, stop_btn], cancels=predict_event, queue=False, + api_name=False, ) else: on( @@ -723,8 +680,7 @@ def attach_clear_events( None, [], ( - ([input_component_column] if input_component_column else []) - + ([interpret_component_column] if self.interpretation else []) + [input_component_column] if input_component_column else [] ), # type: ignore _js=f"""() => {json.dumps( ( @@ -737,26 +693,10 @@ def attach_clear_events( ] else [] ) - + ([{'variant': None, 'visible': False, '__type__': 'update'}] if self.interpretation else []) )} """, ) - def attach_interpretation_events( - self, - interpretation_btn: Button | None, - interpretation_set: list[Interpretation] | None, - input_component_column: Column | None, - interpret_component_column: Column | None, - ): - if interpretation_btn: - interpretation_btn.click( - self.interpret_func, - inputs=self.input_components + self.output_components, - outputs=(interpretation_set or []) + [input_component_column, interpret_component_column], # type: ignore - preprocess=False, - ) - def attach_flagging_events( self, flag_btns: list[Button] | None, clear_btn: ClearButton ): @@ -797,6 +737,7 @@ def attach_flagging_events( None, flag_btn, queue=False, + api_name=False, ) flag_btn.click( flag_method, @@ -804,12 +745,10 @@ def attach_flagging_events( outputs=flag_btn, preprocess=False, queue=False, + api_name=False, ) clear_btn.click( - flag_method.reset, - None, - flag_btn, - queue=False, + flag_method.reset, None, flag_btn, queue=False, api_name=False ) def render_examples(self): @@ -845,20 +784,6 @@ def __repr__(self): repr += f"\n|-{component}" return repr - async def interpret_func(self, *args): - return await self.interpret(list(args)) + [ - Column(visible=False), - Column(visible=True), - ] - - async def interpret(self, raw_input: list[Any]) -> list[Any]: - return [ - {"original": raw_value, "interpretation": interpretation} - for interpretation, raw_value in zip( - (await interpretation.run_interpret(self, raw_input))[0], raw_input - ) - ] - def test_launch(self) -> None: """ Deprecated. diff --git a/gradio/interpretation.py b/gradio/interpretation.py deleted file mode 100644 index 3731a9d38153..000000000000 --- a/gradio/interpretation.py +++ /dev/null @@ -1,329 +0,0 @@ -"""Contains classes and methods related to interpretation for components in Gradio.""" - -from __future__ import annotations - -import copy -import math -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any - -import numpy as np -from gradio_client import utils as client_utils - -from gradio import components - -if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from gradio import Interface - - -class Interpretable(ABC): # noqa: B024 - def __init__(self) -> None: - self.set_interpret_parameters() - - def set_interpret_parameters(self): # noqa: B027 - """ - Set any parameters for interpretation. Properties can be set here to be - used in get_interpretation_neighbors and get_interpretation_scores. - """ - pass - - def get_interpretation_scores( - self, x: Any, neighbors: list[Any] | None, scores: list[float], **kwargs - ) -> list: - """ - Arrange the output values from the neighbors into interpretation scores for the interface to render. - Parameters: - x: Input to interface - neighbors: Neighboring values to input x used for interpretation. - scores: Output value corresponding to each neighbor in neighbors - Returns: - Arrangement of interpretation scores for interfaces to render. - """ - return scores - - -class TokenInterpretable(Interpretable, ABC): - @abstractmethod - def tokenize(self, x: Any) -> tuple[list, list, None]: - """ - Interprets an input data point x by splitting it into a list of tokens (e.g - a string into words or an image into super-pixels). - """ - return [], [], None - - @abstractmethod - def get_masked_inputs(self, tokens: list, binary_mask_matrix: list[list]) -> list: - return [] - - -class NeighborInterpretable(Interpretable, ABC): - @abstractmethod - def get_interpretation_neighbors(self, x: Any) -> tuple[list, dict]: - """ - Generates values similar to input to be used to interpret the significance of the input in the final output. - Parameters: - x: Input to interface - Returns: (neighbor_values, interpret_kwargs, interpret_by_removal) - neighbor_values: Neighboring values to input x to compute for interpretation - interpret_kwargs: Keyword arguments to be passed to get_interpretation_scores - """ - return [], {} - - -async def run_interpret(interface: Interface, raw_input: list): - """ - Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box - interpretation for a certain set of UI component types, as well as the custom interpretation case. - Parameters: - raw_input: a list of raw inputs to apply the interpretation(s) on. - """ - if isinstance(interface.interpretation, list): # Either "default" or "shap" - processed_input = [ - input_component.preprocess(raw_input[i]) - for i, input_component in enumerate(interface.input_components) - ] - original_output = await interface.call_function(0, processed_input) - original_output = original_output["prediction"] - - if len(interface.output_components) == 1: - original_output = [original_output] - - scores, alternative_outputs = [], [] - - for i, (x, interp) in enumerate(zip(raw_input, interface.interpretation)): - if interp == "default": - input_component = interface.input_components[i] - neighbor_raw_input = list(raw_input) - if isinstance(input_component, TokenInterpretable): - tokens, neighbor_values, masks = input_component.tokenize(x) - interface_scores = [] - alternative_output = [] - for neighbor_input in neighbor_values: - neighbor_raw_input[i] = neighbor_input - processed_neighbor_input = [ - input_component.preprocess(neighbor_raw_input[i]) - for i, input_component in enumerate( - interface.input_components - ) - ] - - neighbor_output = await interface.call_function( - 0, processed_neighbor_input - ) - neighbor_output = neighbor_output["prediction"] - if len(interface.output_components) == 1: - neighbor_output = [neighbor_output] - processed_neighbor_output = [ - output_component.postprocess(neighbor_output[i]) - for i, output_component in enumerate( - interface.output_components - ) - ] - - alternative_output.append(processed_neighbor_output) - interface_scores.append( - quantify_difference_in_label( - interface, original_output, neighbor_output - ) - ) - alternative_outputs.append(alternative_output) - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - neighbor_values, - interface_scores, - masks=masks, - tokens=tokens, - ) - ) - elif isinstance(input_component, NeighborInterpretable): - ( - neighbor_values, - interpret_kwargs, - ) = input_component.get_interpretation_neighbors( - x - ) # type: ignore - interface_scores = [] - alternative_output = [] - for neighbor_input in neighbor_values: - neighbor_raw_input[i] = neighbor_input - processed_neighbor_input = [ - input_component.preprocess(neighbor_raw_input[i]) - for i, input_component in enumerate( - interface.input_components - ) - ] - neighbor_output = await interface.call_function( - 0, processed_neighbor_input - ) - neighbor_output = neighbor_output["prediction"] - if len(interface.output_components) == 1: - neighbor_output = [neighbor_output] - processed_neighbor_output = [ - output_component.postprocess(neighbor_output[i]) - for i, output_component in enumerate( - interface.output_components - ) - ] - - alternative_output.append(processed_neighbor_output) - interface_scores.append( - quantify_difference_in_label( - interface, original_output, neighbor_output - ) - ) - alternative_outputs.append(alternative_output) - interface_scores = [-score for score in interface_scores] - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - neighbor_values, - interface_scores, - **interpret_kwargs, - ) - ) - else: - raise ValueError( - f"Component {input_component} does not support interpretation" - ) - elif interp == "shap" or interp == "shapley": - try: - import shap # type: ignore - except (ImportError, ModuleNotFoundError) as err: - raise ValueError( - "The package `shap` is required for this interpretation method. Try: `pip install shap`" - ) from err - input_component = interface.input_components[i] - if not isinstance(input_component, TokenInterpretable): - raise ValueError( - f"Input component {input_component} does not support `shap` interpretation" - ) - - tokens, _, masks = input_component.tokenize(x) - - # construct a masked version of the input - def get_masked_prediction(binary_mask): - assert isinstance(input_component, TokenInterpretable) - masked_xs = input_component.get_masked_inputs(tokens, binary_mask) - preds = [] - for masked_x in masked_xs: - processed_masked_input = copy.deepcopy(processed_input) - processed_masked_input[i] = input_component.preprocess(masked_x) - new_output = client_utils.synchronize_async( - interface.call_function, 0, processed_masked_input - ) - new_output = new_output["prediction"] - if len(interface.output_components) == 1: - new_output = [new_output] - pred = get_regression_or_classification_value( - interface, original_output, new_output - ) - preds.append(pred) - return np.array(preds) - - num_total_segments = len(tokens) - explainer = shap.KernelExplainer( - get_masked_prediction, np.zeros((1, num_total_segments)) - ) - shap_values = explainer.shap_values( - np.ones((1, num_total_segments)), - nsamples=int(interface.num_shap * num_total_segments), - silent=True, - ) - if shap_values is None: - raise ValueError("SHAP values could not be calculated") - scores.append( - input_component.get_interpretation_scores( - raw_input[i], - None, - shap_values[0].tolist(), - masks=masks, - tokens=tokens, - ) - ) - alternative_outputs.append([]) - elif interp is None: - scores.append(None) - alternative_outputs.append([]) - else: - raise ValueError(f"Unknown interpretation method: {interp}") - return scores, alternative_outputs - elif interface.interpretation: # custom interpretation function - processed_input = [ - input_component.preprocess(raw_input[i]) - for i, input_component in enumerate(interface.input_components) - ] - interpreter = interface.interpretation - interpretation = interpreter(*processed_input) - if len(raw_input) == 1: - interpretation = [interpretation] - return interpretation, [] - else: - raise ValueError("No interpretation method specified.") - - -def diff(original: Any, perturbed: Any) -> int | float: - try: # try computing numerical difference - score = float(original) - float(perturbed) - except ValueError: # otherwise, look at strict difference in label - score = int(original != perturbed) - return score - - -def quantify_difference_in_label( - interface: Interface, original_output: list, perturbed_output: list -) -> int | float: - output_component = interface.output_components[0] - post_original_output = output_component.postprocess(original_output[0]) - post_perturbed_output = output_component.postprocess(perturbed_output[0]) - - if isinstance(output_component, components.Label): - original_label = post_original_output["label"] - perturbed_label = post_perturbed_output["label"] - - # Handle different return types of Label interface - if "confidences" in post_original_output: - original_confidence = original_output[0][original_label] - perturbed_confidence = perturbed_output[0][original_label] - score = original_confidence - perturbed_confidence - else: - score = diff(original_label, perturbed_label) - return score - - elif isinstance(output_component, components.Number): - score = diff(post_original_output, post_perturbed_output) - return score - - else: - raise ValueError( - f"This interpretation method doesn't support the Output component: {output_component}" - ) - - -def get_regression_or_classification_value( - interface: Interface, original_output: list, perturbed_output: list -) -> int | float: - """Used to combine regression/classification for Shap interpretation method.""" - output_component = interface.output_components[0] - post_original_output = output_component.postprocess(original_output[0]) - post_perturbed_output = output_component.postprocess(perturbed_output[0]) - - if isinstance(output_component, components.Label): - original_label = post_original_output["label"] - perturbed_label = post_perturbed_output["label"] - - # Handle different return types of Label interface - if "confidences" in post_original_output: - if math.isnan(perturbed_output[0][original_label]): - return 0 - return perturbed_output[0][original_label] - else: - score = diff( - perturbed_label, original_label - ) # Intentionally inverted order of arguments. - return score - - else: - raise ValueError( - f"This interpretation method doesn't support the Output component: {output_component}" - ) diff --git a/gradio/layouts.py b/gradio/layouts.py deleted file mode 100644 index f4d4374aca36..000000000000 --- a/gradio/layouts.py +++ /dev/null @@ -1,373 +0,0 @@ -from __future__ import annotations - -import warnings -from typing import TYPE_CHECKING, Literal - -from gradio_client.documentation import document, set_documentation_group - -from gradio.blocks import BlockContext, Updateable -from gradio.deprecation import warn_deprecation, warn_style_method_deprecation -from gradio.events import Changeable, Selectable - -if TYPE_CHECKING: - from gradio.blocks import Block - -set_documentation_group("layout") - - -@document() -class Row(Updateable, BlockContext): - """ - Row is a layout element within Blocks that renders all children horizontally. - Example: - with gr.Blocks() as demo: - with gr.Row(): - gr.Image("lion.jpg", scale=2) - gr.Image("tiger.jpg", scale=1) - demo.launch() - Guides: controlling-layout - """ - - def __init__( - self, - *, - variant: Literal["default", "panel", "compact"] = "default", - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - equal_height: bool = True, - **kwargs, - ): - """ - Parameters: - variant: row type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap). - visible: If False, row will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - equal_height: If True, makes every child element have equal height - """ - self.variant = variant - self.equal_height = equal_height - if variant == "compact": - self.allow_expected_parents = False - BlockContext.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - - @staticmethod - def update( - visible: bool | None = None, - ): - return { - "visible": visible, - "__type__": "update", - } - - def style( - self, - *, - equal_height: bool | None = None, - **kwargs, - ): - """ - Styles the Row. - Parameters: - equal_height: If True, makes every child element have equal height - """ - warn_style_method_deprecation() - if equal_height is not None: - self.equal_height = equal_height - return self - - -@document() -class Column(Updateable, BlockContext): - """ - Column is a layout element within Blocks that renders all children vertically. The widths of columns can be set through the `scale` and `min_width` parameters. - If a certain scale results in a column narrower than min_width, the min_width parameter will win. - Example: - with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(scale=1): - text1 = gr.Textbox() - text2 = gr.Textbox() - with gr.Column(scale=4): - btn1 = gr.Button("Button 1") - btn2 = gr.Button("Button 2") - Guides: controlling-layout - """ - - def __init__( - self, - *, - scale: int = 1, - min_width: int = 320, - variant: Literal["default", "panel", "compact"] = "default", - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B. - min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first. - variant: column type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap). - visible: If False, column will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - if scale != round(scale): - warn_deprecation( - f"'scale' value should be an integer. Using {scale} will cause issues." - ) - - self.scale = scale - self.min_width = min_width - self.variant = variant - if variant == "compact": - self.allow_expected_parents = False - BlockContext.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - - @staticmethod - def update( - variant: str | None = None, - visible: bool | None = None, - ): - return { - "variant": variant, - "visible": visible, - "__type__": "update", - } - - -class Tabs(Updateable, BlockContext, Changeable, Selectable): - """ - Tabs is a layout element within Blocks that can contain multiple "Tab" Components. - """ - - def __init__( - self, - *, - selected: int | str | None = None, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - selected: The currently selected tab. Must correspond to an id passed to the one of the child TabItems. Defaults to the first TabItem. - visible: If False, Tabs will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - BlockContext.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - Changeable.__init__(self) - Selectable.__init__(self) - self.selected = selected - - @staticmethod - def update( - selected: int | str | None = None, - ): - return { - "selected": selected, - "__type__": "update", - } - - -@document() -class Tab(Updateable, BlockContext, Selectable): - """ - Tab (or its alias TabItem) is a layout element. Components defined within the Tab will be visible when this tab is selected tab. - Example: - with gr.Blocks() as demo: - with gr.Tab("Lion"): - gr.Image("lion.jpg") - gr.Button("New Lion") - with gr.Tab("Tiger"): - gr.Image("tiger.jpg") - gr.Button("New Tiger") - Guides: controlling-layout - """ - - def __init__( - self, - label: str, - *, - id: int | str | None = None, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - label: The visual label for the tab - id: An optional identifier for the tab, required if you wish to control the selected tab from a predict function. - elem_id: An optional string that is assigned as the id of the
containing the contents of the Tab layout. The same string followed by "-button" is attached to the Tab button. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - BlockContext.__init__( - self, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - Selectable.__init__(self) - self.label = label - self.id = id - - def get_expected_parent(self) -> type[Tabs]: - return Tabs - - def get_block_name(self): - return "tabitem" - - -TabItem = Tab - - -@document() -class Group(Updateable, BlockContext): - """ - Group is a layout element within Blocks which groups together children so that - they do not have any padding or margin between them. - Example: - with gr.Group(): - gr.Textbox(label="First") - gr.Textbox(label="Last") - """ - - def __init__( - self, - *, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - visible: If False, group will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - BlockContext.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - - @staticmethod - def update( - visible: bool | None = None, - ): - return { - "visible": visible, - "__type__": "update", - } - - -class Box(Updateable, BlockContext): - """ - DEPRECATED. - Box is a a layout element which places children in a box with rounded corners and - some padding around them. - Example: - with gr.Box(): - gr.Textbox(label="First") - gr.Textbox(label="Last") - """ - - def __init__( - self, - *, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - visible: If False, box will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - warnings.warn("gr.Box is deprecated. Use gr.Group instead.", DeprecationWarning) - BlockContext.__init__(self, visible=visible, elem_id=elem_id, **kwargs) - - @staticmethod - def update( - visible: bool | None = None, - ): - return { - "visible": visible, - "__type__": "update", - } - - def style(self, **kwargs): - warn_style_method_deprecation() - return self - - -class Form(Updateable, BlockContext): - def __init__(self, *, scale: int = 0, min_width: int = 0, **kwargs): - """ - Parameters: - scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B. - min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first. - """ - self.scale = scale - self.min_width = min_width - BlockContext.__init__(self, **kwargs) - - def add_child(self, child: Block): - if isinstance(self.parent, Row): - scale = getattr(child, "scale", None) - self.scale += 1 if scale is None else scale - self.min_width += getattr(child, "min_width", 0) or 0 - BlockContext.add_child(self, child) - - -@document() -class Accordion(Updateable, BlockContext): - """ - Accordion is a layout element which can be toggled to show/hide the contained content. - Example: - with gr.Accordion("See Details"): - gr.Markdown("lorem ipsum") - """ - - def __init__( - self, - label, - *, - open: bool = True, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - label: name of accordion section. - open: if True, accordion is open by default. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.label = label - self.open = open - BlockContext.__init__( - self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs - ) - - @staticmethod - def update( - open: bool | None = None, - label: str | None = None, - visible: bool | None = None, - ): - return { - "visible": visible, - "label": label, - "open": open, - "__type__": "update", - } diff --git a/gradio/layouts/__init__.py b/gradio/layouts/__init__.py new file mode 100644 index 000000000000..f57d339cceac --- /dev/null +++ b/gradio/layouts/__init__.py @@ -0,0 +1,19 @@ +from .accordion import Accordion +from .box import Box +from .column import Column +from .form import Form +from .group import Group +from .row import Row +from .tabs import Tab, TabItem, Tabs + +__all__ = [ + "Accordion", + "Box", + "Column", + "Form", + "Row", + "Group", + "Tabs", + "Tab", + "TabItem", +] diff --git a/gradio/layouts/accordion.py b/gradio/layouts/accordion.py new file mode 100644 index 000000000000..23f7d6003349 --- /dev/null +++ b/gradio/layouts/accordion.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from gradio_client.documentation import document, set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta + +if TYPE_CHECKING: + pass + +set_documentation_group("layout") + + +@document() +class Accordion(BlockContext, metaclass=ComponentMeta): + """ + Accordion is a layout element which can be toggled to show/hide the contained content. + Example: + with gr.Accordion("See Details"): + gr.Markdown("lorem ipsum") + """ + + EVENTS = [] + + def __init__( + self, + label, + *, + open: bool = True, + visible: bool = True, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + **kwargs, + ): + """ + Parameters: + label: name of accordion section. + open: if True, accordion is open by default. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + self.label = label + self.open = open + BlockContext.__init__( + self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + + @staticmethod + def update( + open: bool | None = None, + label: str | None = None, + visible: bool | None = None, + ): + return { + "visible": visible, + "label": label, + "open": open, + "__type__": "update", + } diff --git a/gradio/layouts/box.py b/gradio/layouts/box.py new file mode 100644 index 000000000000..af0f62425daa --- /dev/null +++ b/gradio/layouts/box.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +import warnings + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta +from gradio.deprecation import warn_style_method_deprecation + + +class Box(BlockContext, metaclass=ComponentMeta): + """ + DEPRECATED. + Box is a a layout element which places children in a box with rounded corners and + some padding around them. + Example: + with gr.Box(): + gr.Textbox(label="First") + gr.Textbox(label="Last") + """ + + EVENTS = [] + + def __init__( + self, + *, + visible: bool = True, + elem_id: str | None = None, + **kwargs, + ): + """ + Parameters: + visible: If False, box will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + warnings.warn("gr.Box is deprecated. Use gr.Group instead.", DeprecationWarning) + BlockContext.__init__(self, visible=visible, elem_id=elem_id, **kwargs) + + @staticmethod + def update( + visible: bool | None = None, + ): + return { + "visible": visible, + "__type__": "update", + } + + def style(self, **kwargs): + warn_style_method_deprecation() + return self diff --git a/gradio/layouts/column.py b/gradio/layouts/column.py new file mode 100644 index 000000000000..d06be4863f60 --- /dev/null +++ b/gradio/layouts/column.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +from typing import Literal + +from gradio_client.documentation import document, set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta +from gradio.deprecation import warn_deprecation + +set_documentation_group("layout") + + +@document() +class Column(BlockContext, metaclass=ComponentMeta): + """ + Column is a layout element within Blocks that renders all children vertically. The widths of columns can be set through the `scale` and `min_width` parameters. + If a certain scale results in a column narrower than min_width, the min_width parameter will win. + Example: + with gr.Blocks() as demo: + with gr.Row(): + with gr.Column(scale=1): + text1 = gr.Textbox() + text2 = gr.Textbox() + with gr.Column(scale=4): + btn1 = gr.Button("Button 1") + btn2 = gr.Button("Button 2") + Guides: controlling-layout + """ + + EVENTS = ["baz"] + + def __init__( + self, + *, + scale: int = 1, + min_width: int = 320, + variant: Literal["default", "panel", "compact"] = "default", + visible: bool = True, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + **kwargs, + ): + """ + Parameters: + scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B. + min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first. + variant: column type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap). + visible: If False, column will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + if scale != round(scale): + warn_deprecation( + f"'scale' value should be an integer. Using {scale} will cause issues." + ) + + self.scale = scale + self.min_width = min_width + self.variant = variant + if variant == "compact": + self.allow_expected_parents = False + BlockContext.__init__( + self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + + @staticmethod + def update( + variant: str | None = None, + visible: bool | None = None, + ): + return { + "variant": variant, + "visible": visible, + "__type__": "update", + } diff --git a/gradio/layouts/form.py b/gradio/layouts/form.py new file mode 100644 index 000000000000..f998ff3c4013 --- /dev/null +++ b/gradio/layouts/form.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from gradio_client.documentation import set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta +from gradio.layouts.row import Row + +if TYPE_CHECKING: + from gradio.blocks import Block + +set_documentation_group("layout") + + +class Form(BlockContext, metaclass=ComponentMeta): + EVENTS = [] + + def __init__(self, *, scale: int = 0, min_width: int = 0, **kwargs): + """ + Parameters: + scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B. + min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first. + """ + self.scale = scale + self.min_width = min_width + BlockContext.__init__(self, **kwargs) + + def add_child(self, child: Block): + if isinstance(self.parent, Row): + scale = getattr(child, "scale", None) + self.scale += 1 if scale is None else scale + self.min_width += getattr(child, "min_width", 0) or 0 + BlockContext.add_child(self, child) diff --git a/gradio/layouts/group.py b/gradio/layouts/group.py new file mode 100644 index 000000000000..4237cf769fd4 --- /dev/null +++ b/gradio/layouts/group.py @@ -0,0 +1,49 @@ +from __future__ import annotations + +from gradio_client.documentation import document, set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta + +set_documentation_group("layout") + + +@document() +class Group(BlockContext, metaclass=ComponentMeta): + """ + Group is a layout element within Blocks which groups together children so that + they do not have any padding or margin between them. + Example: + with gr.Group(): + gr.Textbox(label="First") + gr.Textbox(label="Last") + """ + + EVENTS = [] + + def __init__( + self, + *, + visible: bool = True, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + **kwargs, + ): + """ + Parameters: + visible: If False, group will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + BlockContext.__init__( + self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + + @staticmethod + def update( + visible: bool | None = None, + ): + return { + "visible": visible, + "__type__": "update", + } diff --git a/gradio/layouts/row.py b/gradio/layouts/row.py new file mode 100644 index 000000000000..8240be216184 --- /dev/null +++ b/gradio/layouts/row.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from typing import Literal + +from gradio_client.documentation import document, set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta +from gradio.deprecation import warn_style_method_deprecation + +set_documentation_group("layout") + + +@document() +class Row(BlockContext, metaclass=ComponentMeta): + """ + Row is a layout element within Blocks that renders all children horizontally. + Example: + with gr.Blocks() as demo: + with gr.Row(): + gr.Image("lion.jpg", scale=2) + gr.Image("tiger.jpg", scale=1) + demo.launch() + Guides: controlling-layout + """ + + EVENTS = [] + + def __init__( + self, + *, + variant: Literal["default", "panel", "compact"] = "default", + visible: bool = True, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + equal_height: bool = True, + **kwargs, + ): + """ + Parameters: + variant: row type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap). + visible: If False, row will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + equal_height: If True, makes every child element have equal height + """ + self.variant = variant + self.equal_height = equal_height + if variant == "compact": + self.allow_expected_parents = False + BlockContext.__init__( + self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + + @staticmethod + def update( + visible: bool | None = None, + ): + return { + "visible": visible, + "__type__": "update", + } + + def style( + self, + *, + equal_height: bool | None = None, + **kwargs, + ): + """ + Styles the Row. + Parameters: + equal_height: If True, makes every child element have equal height + """ + warn_style_method_deprecation() + if equal_height is not None: + self.equal_height = equal_height + return self diff --git a/gradio/layouts/tabs.py b/gradio/layouts/tabs.py new file mode 100644 index 000000000000..1eb883873dde --- /dev/null +++ b/gradio/layouts/tabs.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from gradio_client.documentation import document, set_documentation_group + +from gradio.blocks import BlockContext +from gradio.component_meta import ComponentMeta +from gradio.events import Events + +set_documentation_group("layout") + + +class Tabs(BlockContext, metaclass=ComponentMeta): + """ + Tabs is a layout element within Blocks that can contain multiple "Tab" Components. + """ + + EVENTS = [Events.change, Events.select] + + def __init__( + self, + *, + selected: int | str | None = None, + visible: bool = True, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + **kwargs, + ): + """ + Parameters: + selected: The currently selected tab. Must correspond to an id passed to the one of the child TabItems. Defaults to the first TabItem. + visible: If False, Tabs will be hidden. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + BlockContext.__init__( + self, visible=visible, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + self.selected = selected + + @staticmethod + def update( + selected: int | str | None = None, + ): + return { + "selected": selected, + "__type__": "update", + } + + +@document() +class Tab(BlockContext, metaclass=ComponentMeta): + """ + Tab (or its alias TabItem) is a layout element. Components defined within the Tab will be visible when this tab is selected tab. + Example: + with gr.Blocks() as demo: + with gr.Tab("Lion"): + gr.Image("lion.jpg") + gr.Button("New Lion") + with gr.Tab("Tiger"): + gr.Image("tiger.jpg") + gr.Button("New Tiger") + Guides: controlling-layout + """ + + EVENTS = [Events.select] + + def __init__( + self, + label: str, + *, + id: int | str | None = None, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + **kwargs, + ): + """ + Parameters: + label: The visual label for the tab + id: An optional identifier for the tab, required if you wish to control the selected tab from a predict function. + elem_id: An optional string that is assigned as the id of the
containing the contents of the Tab layout. The same string followed by "-button" is attached to the Tab button. Can be used for targeting CSS styles. + elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. + """ + BlockContext.__init__( + self, elem_id=elem_id, elem_classes=elem_classes, **kwargs + ) + self.label = label + self.id = id + + def get_expected_parent(self) -> type[Tabs]: + return Tabs + + def get_block_name(self): + return "tabitem" + + +TabItem = Tab diff --git a/gradio/oauth.py b/gradio/oauth.py index ccb63a79b758..78053a9dee89 100644 --- a/gradio/oauth.py +++ b/gradio/oauth.py @@ -86,12 +86,12 @@ async def oauth_login(request: fastapi.Request): if ".hf.space" in redirect_uri: # In Space, FastAPI redirect as http but we want https redirect_uri = redirect_uri.replace("http://", "https://") - return await oauth.huggingface.authorize_redirect(request, redirect_uri) + return await oauth.huggingface.authorize_redirect(request, redirect_uri) # type: ignore @app.get("/login/callback") async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse: """Endpoint that handles the OAuth callback.""" - token = await oauth.huggingface.authorize_access_token(request) + token = await oauth.huggingface.authorize_access_token(request) # type: ignore request.session["oauth_profile"] = token["userinfo"] request.session["oauth_token"] = token return RedirectResponse("/") diff --git a/gradio/outputs.py b/gradio/outputs.py deleted file mode 100644 index b6d2d20c8f5e..000000000000 --- a/gradio/outputs.py +++ /dev/null @@ -1,313 +0,0 @@ -# type: ignore -""" -This module defines various classes that can serve as the `output` to an interface. Each class must inherit from -`OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are -automatically added to a registry, which allows them to be easily referenced in other parts of the code. -""" - -from __future__ import annotations - -from typing import Optional - -from gradio import components -from gradio.deprecation import warn_deprecation - - -def warn_outputs_deprecation(): - warn_deprecation( - "Usage of gradio.outputs is deprecated, and will not be supported in the future, " - "please import your components from gradio.components", - ) - - -class Textbox(components.Textbox): - def __init__( - self, - type: str = "text", - label: Optional[str] = None, - ): - warn_outputs_deprecation() - super().__init__(label=label, type=type) - - -class Image(components.Image): - """ - Component displays an output image. - Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]] - """ - - def __init__( - self, type: str = "auto", plot: bool = False, label: Optional[str] = None - ): - """ - Parameters: - type (str): Type of value to be passed to component. "numpy" expects a numpy array with shape (height, width, 3), "pil" expects a PIL image object, "file" expects a file path to the saved image or a remote URL, "plot" expects a matplotlib.pyplot object, "auto" detects return type. - plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function. - label (str): component name in interface. - """ - warn_outputs_deprecation() - if plot: - type = "plot" - super().__init__(type=type, label=label) - - -class Video(components.Video): - """ - Used for video output. - Output type: filepath - """ - - def __init__(self, type: Optional[str] = None, label: Optional[str] = None): - """ - Parameters: - type (str): Type of video format to be passed to component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep returned format. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(format=type, label=label) - - -class Audio(components.Audio): - """ - Creates an audio player that plays the output audio. - Output type: Union[Tuple[int, numpy.array], str] - """ - - def __init__(self, type: str = "auto", label: Optional[str] = None): - """ - Parameters: - type (str): Type of value to be passed to component. "numpy" returns a 2-set tuple with an integer sample_rate and the data as 16-bit int numpy.array of shape (samples, 2), "file" returns a temporary file path to the saved wav audio file, "auto" detects return type. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(type=type, label=label) - - -class File(components.File): - """ - Used for file output. - Output type: Union[file-like, str] - """ - - def __init__(self, label: Optional[str] = None): - """ - Parameters: - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(label=label) - - -class Dataframe(components.Dataframe): - """ - Component displays 2D output through a spreadsheet interface. - Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]] - """ - - def __init__( - self, - headers: Optional[list[str]] = None, - max_rows: Optional[int] = 20, - max_cols: Optional[int] = None, - overflow_row_behaviour: str = "paginate", - type: str = "auto", - label: Optional[str] = None, - ): - """ - Parameters: - headers (List[str]): Header names to dataframe. Only applicable if type is "numpy" or "array". - max_rows (int): Maximum number of rows to display at once. Set to None for infinite. - max_cols (int): Maximum number of columns to display at once. Set to None for infinite. - overflow_row_behaviour (str): If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows. - type (str): Type of value to be passed to component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for Python array, "auto" detects return type. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__( - headers=headers, - type=type, - label=label, - max_rows=max_rows, - max_cols=max_cols, - overflow_row_behaviour=overflow_row_behaviour, - ) - - -class Timeseries(components.Timeseries): - """ - Component accepts pandas.DataFrame. - Output type: pandas.DataFrame - """ - - def __init__( - self, x: str = None, y: str | list[str] = None, label: Optional[str] = None - ): - """ - Parameters: - x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series. - y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(x=x, y=y, label=label) - - -class State(components.State): - """ - Special hidden component that stores state across runs of the interface. - Output type: Any - """ - - def __init__(self, label: Optional[str] = None): - """ - Parameters: - label (str): component name in interface (not used). - """ - warn_outputs_deprecation() - super().__init__(label=label) - - -class Label(components.Label): - """ - Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1. - Output type: Union[Dict[str, float], str, int, float] - """ - - def __init__( - self, - num_top_classes: Optional[int] = None, - type: str = "auto", - label: Optional[str] = None, - ): - """ - Parameters: - num_top_classes (int): number of most confident classes to show. - type (str): Type of value to be passed to component. "value" expects a single out label, "confidences" expects a dictionary mapping labels to confidence scores, "auto" detects return type. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(num_top_classes=num_top_classes, type=type, label=label) - - -class KeyValues: - """ - Component displays a table representing values for multiple fields. - Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]] - """ - - def __init__(self, value: str = " ", *, label: Optional[str] = None, **kwargs): - """ - Parameters: - value (str): IGNORED - label (str): component name in interface. - """ - raise DeprecationWarning( - "The KeyValues component is deprecated. Please use the DataFrame or JSON " - "components instead." - ) - - -class HighlightedText(components.HighlightedText): - """ - Component creates text that contains spans that are highlighted by category or numerical value. - Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text. - Output type: List[Tuple[str, Union[float, str]]] - """ - - def __init__( - self, - color_map: dict[str, str] = None, - label: Optional[str] = None, - show_legend: bool = False, - ): - """ - Parameters: - color_map (Dict[str, str]): Map between category and respective colors - label (str): component name in interface. - show_legend (bool): whether to show span categories in a separate legend or inline. - """ - warn_outputs_deprecation() - super().__init__(color_map=color_map, label=label, show_legend=show_legend) - - -class JSON(components.JSON): - """ - Used for JSON output. Expects a JSON string or a Python object that is JSON serializable. - Output type: Union[str, Any] - """ - - def __init__(self, label: Optional[str] = None): - """ - Parameters: - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(label=label) - - -class HTML(components.HTML): - """ - Used for HTML output. Expects an HTML valid string. - Output type: str - """ - - def __init__(self, label: Optional[str] = None): - """ - Parameters: - label (str): component name in interface. - """ - super().__init__(label=label) - - -class Carousel(components.Carousel): - """ - Component displays a set of output components that can be scrolled through. - """ - - def __init__( - self, - components: components.Component | list[components.Component], - label: Optional[str] = None, - ): - """ - Parameters: - components (Union[List[Component], Component]): Classes of component(s) that will be scrolled through. - label (str): component name in interface. - """ - warn_outputs_deprecation() - super().__init__(components=components, label=label) - - -class Chatbot(components.Chatbot): - """ - Component displays a chatbot output showing both user submitted messages and responses - Output type: List[Tuple[str, str]] - """ - - def __init__(self, label: Optional[str] = None): - """ - Parameters: - label (str): component name in interface (not used). - """ - warn_outputs_deprecation() - super().__init__(label=label) - - -class Image3D(components.Model3D): - """ - Used for 3D image model output. - Input type: File object of type (.obj, glb, or .gltf) - """ - - def __init__( - self, - clear_color=None, - label: Optional[str] = None, - ): - """ - Parameters: - label (str): component name in interface. - optional (bool): If True, the interface can be submitted with no uploaded image, in which case the input value is None. - """ - warn_outputs_deprecation() - super().__init__(clear_color=clear_color, label=label) diff --git a/gradio/package.json b/gradio/package.json index f22c0f1faef2..f9788268fe32 100644 --- a/gradio/package.json +++ b/gradio/package.json @@ -1,6 +1,6 @@ { "name": "gradio", - "version": "3.47.1", + "version": "3.45.0-beta.11", "description": "", "python": "true" } diff --git a/gradio/processing_utils.py b/gradio/processing_utils.py index d2fd6292cffd..342dd0b12fd8 100644 --- a/gradio/processing_utils.py +++ b/gradio/processing_utils.py @@ -1,25 +1,33 @@ from __future__ import annotations import base64 +import hashlib import json import logging import os +import secrets import shutil import subprocess import tempfile +import urllib.request import warnings from io import BytesIO from pathlib import Path +from typing import TYPE_CHECKING, Any, Literal +import aiofiles +import anyio import numpy as np +import requests +from anyio import CapacityLimiter +from fastapi import UploadFile from gradio_client import utils as client_utils from PIL import Image, ImageOps, PngImagePlugin from gradio import wasm_utils +from gradio.data_classes import FileData -if not wasm_utils.IS_WASM: - # TODO: Support ffmpeg on Wasm - from ffmpy import FFmpeg, FFprobe, FFRuntimeError +from .utils import abspath, is_in_or_equal with warnings.catch_warnings(): warnings.simplefilter("ignore") # Ignore pydub warning if ffmpeg is not installed @@ -27,6 +35,9 @@ log = logging.getLogger(__name__) +if TYPE_CHECKING: + from gradio.components.base import Component + ######################### # GENERAL ######################### @@ -108,6 +119,217 @@ def encode_array_to_base64(image_array): return "data:image/png;base64," + base64_str +def hash_file(file_path: str | Path, chunk_num_blocks: int = 128) -> str: + sha1 = hashlib.sha1() + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(chunk_num_blocks * sha1.block_size), b""): + sha1.update(chunk) + return sha1.hexdigest() + + +def hash_url(url: str, chunk_num_blocks: int = 128) -> str: + sha1 = hashlib.sha1() + remote = urllib.request.urlopen(url) + max_file_size = 100 * 1024 * 1024 # 100MB + total_read = 0 + while True: + data = remote.read(chunk_num_blocks * sha1.block_size) + total_read += chunk_num_blocks * sha1.block_size + if not data or total_read > max_file_size: + break + sha1.update(data) + return sha1.hexdigest() + + +def hash_bytes(bytes: bytes): + sha1 = hashlib.sha1() + sha1.update(bytes) + return sha1.hexdigest() + + +def hash_base64(base64_encoding: str, chunk_num_blocks: int = 128) -> str: + sha1 = hashlib.sha1() + for i in range(0, len(base64_encoding), chunk_num_blocks * sha1.block_size): + data = base64_encoding[i : i + chunk_num_blocks * sha1.block_size] + sha1.update(data.encode("utf-8")) + return sha1.hexdigest() + + +def save_pil_to_cache( + img: Image.Image, cache_dir: str, format: Literal["png", "jpg"] = "png" +) -> str: + bytes_data = encode_pil_to_bytes(img, format) + temp_dir = Path(cache_dir) / hash_bytes(bytes_data) + temp_dir.mkdir(exist_ok=True, parents=True) + filename = str((temp_dir / f"image.{format}").resolve()) + img.save(filename, pnginfo=get_pil_metadata(img)) + return filename + + +def save_img_array_to_cache( + arr: np.ndarray, cache_dir: str, format: Literal["png", "jpg"] = "png" +) -> str: + pil_image = Image.fromarray(_convert(arr, np.uint8, force_copy=False)) + return save_pil_to_cache(pil_image, cache_dir, format=format) + + +def save_audio_to_cache( + data: np.ndarray, sample_rate: int, format: str, cache_dir: str +) -> str: + temp_dir = Path(cache_dir) / hash_bytes(data.tobytes()) + temp_dir.mkdir(exist_ok=True, parents=True) + filename = str((temp_dir / f"audio.{format}").resolve()) + audio_to_file(sample_rate, data, filename, format=format) + return filename + + +def save_bytes_to_cache(data: bytes, file_name: str, cache_dir: str) -> str: + path = Path(cache_dir) / hash_bytes(data) + path.mkdir(exist_ok=True, parents=True) + path = path / Path(file_name).name + path.write_bytes(data) + return str(path.resolve()) + + +def save_file_to_cache(file_path: str | Path, cache_dir: str) -> str: + """Returns a temporary file path for a copy of the given file path if it does + not already exist. Otherwise returns the path to the existing temp file.""" + temp_dir = hash_file(file_path) + temp_dir = Path(cache_dir) / temp_dir + temp_dir.mkdir(exist_ok=True, parents=True) + + name = client_utils.strip_invalid_filename_characters(Path(file_path).name) + full_temp_file_path = str(abspath(temp_dir / name)) + + if not Path(full_temp_file_path).exists(): + shutil.copy2(file_path, full_temp_file_path) + + return full_temp_file_path + + +async def save_uploaded_file( + file: UploadFile, upload_dir: str, limiter: CapacityLimiter | None = None +) -> str: + temp_dir = secrets.token_hex( + 20 + ) # Since the full file is being uploaded anyways, there is no benefit to hashing the file. + temp_dir = Path(upload_dir) / temp_dir + temp_dir.mkdir(exist_ok=True, parents=True) + + sha1 = hashlib.sha1() + + if file.filename: + file_name = Path(file.filename).name + name = client_utils.strip_invalid_filename_characters(file_name) + else: + name = f"tmp{secrets.token_hex(5)}" + + full_temp_file_path = str(abspath(temp_dir / name)) + + async with aiofiles.open(full_temp_file_path, "wb") as output_file: + while True: + content = await file.read(100 * 1024 * 1024) + if not content: + break + sha1.update(content) + await output_file.write(content) + + directory = Path(upload_dir) / sha1.hexdigest() + directory.mkdir(exist_ok=True, parents=True) + dest = (directory / name).resolve() + + await anyio.to_thread.run_sync( + shutil.move, full_temp_file_path, dest, limiter=limiter + ) + + return str(dest) + + +def save_url_to_cache(url: str, cache_dir: str) -> str: + """Downloads a file and makes a temporary file path for a copy if does not already + exist. Otherwise returns the path to the existing temp file.""" + temp_dir = hash_url(url) + temp_dir = Path(cache_dir) / temp_dir + temp_dir.mkdir(exist_ok=True, parents=True) + + name = client_utils.strip_invalid_filename_characters(Path(url).name) + full_temp_file_path = str(abspath(temp_dir / name)) + + if not Path(full_temp_file_path).exists(): + with requests.get(url, stream=True) as r, open(full_temp_file_path, "wb") as f: + shutil.copyfileobj(r.raw, f) + + return full_temp_file_path + + +def save_base64_to_cache( + base64_encoding: str, cache_dir: str, file_name: str | None = None +) -> str: + """Converts a base64 encoding to a file and returns the path to the file if + the file doesn't already exist. Otherwise returns the path to the existing file. + """ + temp_dir = hash_base64(base64_encoding) + temp_dir = Path(cache_dir) / temp_dir + temp_dir.mkdir(exist_ok=True, parents=True) + + guess_extension = client_utils.get_extension(base64_encoding) + if file_name: + file_name = client_utils.strip_invalid_filename_characters(file_name) + elif guess_extension: + file_name = f"file.{guess_extension}" + else: + file_name = "file" + + full_temp_file_path = str(abspath(temp_dir / file_name)) # type: ignore + + if not Path(full_temp_file_path).exists(): + data, _ = client_utils.decode_base64_to_binary(base64_encoding) + with open(full_temp_file_path, "wb") as fb: + fb.write(data) + + return full_temp_file_path + + +def move_files_to_cache(data: Any, block: Component): + """Move files to cache and replace the file path with the cache path. + + Runs after postprocess and before preprocess. + + Args: + data: The input or output data for a component. + block: The component + """ + + def _move_to_cache(d: dict): + payload = FileData(**d) + if payload.name and ( + client_utils.is_http_url_like(payload.name) + or not is_in_or_equal(payload.name, block.GRADIO_CACHE) + ): + if payload.is_file: + if client_utils.is_http_url_like(payload.name): + temp_file_path = save_url_to_cache( + payload.name, cache_dir=block.GRADIO_CACHE + ) + else: + temp_file_path = save_file_to_cache( + payload.name, cache_dir=block.GRADIO_CACHE + ) + else: + assert payload.data + temp_file_path = save_base64_to_cache( + payload.data, + file_name=payload.name, + cache_dir=block.GRADIO_CACHE, + ) + payload.is_file = True + block.temp_files.add(temp_file_path) + payload.name = temp_file_path + return payload.model_dump() + + return client_utils.traverse(data, _move_to_cache, client_utils.is_file_obj) + + def resize_and_crop(img, size, crop_type="center"): """ Resize and crop an image to fit the specified size. @@ -505,6 +727,8 @@ def video_is_playable(video_filepath: str) -> bool: .webm -> vp9 .ogg -> theora """ + from ffmpy import FFprobe, FFRuntimeError + try: container = Path(video_filepath).suffix.lower() probe = FFprobe( @@ -526,6 +750,8 @@ def video_is_playable(video_filepath: str) -> bool: def convert_video_to_playable_mp4(video_path: str) -> str: """Convert the video to mp4. If something goes wrong return the original video.""" + from ffmpy import FFmpeg, FFRuntimeError + try: with tempfile.NamedTemporaryFile(delete=False) as tmp_file: output_path = Path(video_path).with_suffix(".mp4") diff --git a/gradio/routes.py b/gradio/routes.py index 9f110b957886..dc2e0c21fbac 100644 --- a/gradio/routes.py +++ b/gradio/routes.py @@ -46,7 +46,7 @@ import gradio import gradio.ranged_response as ranged_response -from gradio import route_utils, utils, wasm_utils +from gradio import processing_utils, route_utils, utils, wasm_utils from gradio.context import Context from gradio.data_classes import ComponentServerBody, PredictBody, ResetBody from gradio.deprecation import warn_deprecation @@ -126,7 +126,7 @@ def __init__(self, **kwargs): self.queue_token = secrets.token_urlsafe(32) self.startup_events_triggered = False self.uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str( - Path(tempfile.gettempdir()) / "gradio" + (Path(tempfile.gettempdir()) / "gradio").resolve() ) self.change_event: None | threading.Event = None self._asyncio_tasks: list[asyncio.Task] = [] @@ -351,8 +351,8 @@ def main(request: fastapi.Request, user: str = Depends(get_current_user)): @app.get("/info/", dependencies=[Depends(login_check)]) @app.get("/info", dependencies=[Depends(login_check)]) def api_info(serialize: bool = True): - config = app.get_blocks().config - return gradio.blocks.get_api_info(config, serialize) # type: ignore + # config = app.get_blocks().get_api_info() + return app.get_blocks().get_api_info() # type: ignore @app.get("/config/", dependencies=[Depends(login_check)]) @app.get("/config", dependencies=[Depends(login_check)]) @@ -371,6 +371,32 @@ def static_resource(path: str): static_file = safe_join(STATIC_PATH_LIB, path) return FileResponse(static_file) + @app.get("/custom_component/{id}/{type}/{file_name}") + def custom_component_path(id: str, type: str, file_name: str): + config = app.get_blocks().config + components = config["components"] + location = next( + (item for item in components if item["component_class_id"] == id), None + ) + + if location is None: + raise HTTPException(status_code=404, detail="Component not found.") + + component_instance = app.get_blocks().get_component(location["id"]) + + module_name = component_instance.__class__.__module__ + module_path = sys.modules[module_name].__file__ + + if module_path is None or component_instance is None: + raise HTTPException(status_code=404, detail="Component not found.") + + return FileResponse( + safe_join( + str(Path(module_path).parent), + f"{component_instance.__class__.TEMPLATE_DIR}/{type}/{file_name}", + ) + ) + @app.get("/assets/{path:path}") def build_resource(path: str): build_file = safe_join(BUILD_PATH_LIB, path) @@ -632,11 +658,10 @@ async def upload_file( files: List[UploadFile] = File(...), ): output_files = [] - file_manager = gradio.File() for input_file in files: output_files.append( - await file_manager.save_uploaded_file( - input_file, app.uploaded_file_dir + await processing_utils.save_uploaded_file( + input_file, app.uploaded_file_dir, app.get_blocks().limiter ) ) return output_files diff --git a/gradio/utils.py b/gradio/utils.py index a7356397faec..20494fccdd95 100644 --- a/gradio/utils.py +++ b/gradio/utils.py @@ -37,7 +37,6 @@ import anyio import matplotlib import requests -from gradio_client.serializing import Serializable from typing_extensions import ParamSpec import gradio @@ -45,7 +44,7 @@ from gradio.strings import en if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from gradio.blocks import Block, BlockContext, Blocks + from gradio.blocks import BlockContext, Blocks from gradio.components import Component from gradio.routes import App, Request @@ -148,7 +147,7 @@ def watchfn(reloader: SourceFileReloader): # The thread running watchfn will be the thread reloading # the app. So we need to modify this thread_data attr here # so that subsequent calls to reload don't launch the app - from gradio.reload import reload_thread + from gradio.cli.commands.reload import reload_thread reload_thread.running_reload = True @@ -174,10 +173,13 @@ def iter_py_files() -> Iterator[Path]: module = None reload_dirs = [Path(dir_) for dir_ in reloader.watch_dirs] + import sys + + for dir_ in reload_dirs: + sys.path.insert(0, str(dir_)) + mtimes = {} while reloader.should_watch(): - import sys - changed = get_changes() if changed: print(f"Changes detected in: {changed}") @@ -867,7 +869,7 @@ def tex2svg(formula, *args): fig = plt.figure(figsize=(0.01, 0.01)) fig.text(0, 0, rf"${formula}$", fontsize=fontsize) output = BytesIO() - fig.savefig( + fig.savefig( # type: ignore output, dpi=dpi, transparent=True, @@ -928,41 +930,6 @@ def is_in_or_equal(path_1: str | Path, path_2: str | Path): return True -def get_serializer_name(block: Block) -> str | None: - if not hasattr(block, "serialize"): - return None - - def get_class_that_defined_method(meth: Callable): - # Adapted from: https://stackoverflow.com/a/25959545/5209347 - if isinstance(meth, functools.partial): - return get_class_that_defined_method(meth.func) - if inspect.ismethod(meth) or ( - inspect.isbuiltin(meth) - and getattr(meth, "__self__", None) is not None - and getattr(meth.__self__, "__class__", None) - ): - for cls in inspect.getmro(meth.__self__.__class__): - # Find the first serializer defined in gradio_client that - if issubclass(cls, Serializable) and "gradio_client" in cls.__module__: - return cls - if meth.__name__ in cls.__dict__: - return cls - meth = getattr(meth, "__func__", meth) # fallback to __qualname__ parsing - if inspect.isfunction(meth): - cls = getattr( - inspect.getmodule(meth), - meth.__qualname__.split(".", 1)[0].rsplit(".", 1)[0], - None, - ) - if isinstance(cls, type): - return cls - return getattr(meth, "__objclass__", None) - - cls = get_class_that_defined_method(block.serialize) # type: ignore - if cls: - return cls.__name__ - - HTML_TAG_RE = re.compile("<.*?>") diff --git a/js/_spaces-test/src/lib/EndpointInputs.svelte b/js/_spaces-test/src/lib/EndpointInputs.svelte index ae9833040f8a..6b0f5097381a 100644 --- a/js/_spaces-test/src/lib/EndpointInputs.svelte +++ b/js/_spaces-test/src/lib/EndpointInputs.svelte @@ -4,8 +4,6 @@ */ export let app_info; - $: console.log(app_info); - /** * @type any[] */ diff --git a/js/_spaces-test/src/routes/+layout.svelte b/js/_spaces-test/src/routes/+layout.svelte index 361ac4ccca02..733c54dae15b 100644 --- a/js/_spaces-test/src/routes/+layout.svelte +++ b/js/_spaces-test/src/routes/+layout.svelte @@ -4,8 +4,6 @@ import { page } from "$app/stores"; import { afterNavigate } from "$app/navigation"; - $: console.log($page); - const links = [ ["/embeds", "Embeds"], ["/client-browser", "Client-Browser"], diff --git a/js/_spaces-test/src/routes/client-browser/+page.svelte b/js/_spaces-test/src/routes/client-browser/+page.svelte index 55e8bd101f44..3cb8d503462b 100644 --- a/js/_spaces-test/src/routes/client-browser/+page.svelte +++ b/js/_spaces-test/src/routes/client-browser/+page.svelte @@ -47,8 +47,6 @@ hf_token: hf_token }); - console.log(app.config); - const { named_endpoints, unnamed_endpoints } = await app.view_api(); named = Object.keys(named_endpoints); @@ -66,8 +64,6 @@ ]; if (!_endpoint_info) return; - console.log(_endpoint_info); - app_info = _endpoint_info; active_endpoint = type === "unnamed" ? parseInt(_endpoint) : _endpoint; } @@ -93,8 +89,6 @@ .on("status", (_status) => { status = _status.stage; }); - // console.log(res); - // response_data = res; } function cancel() { diff --git a/js/accordion/static/StaticAccordion.svelte b/js/accordion/static/StaticAccordion.svelte index b5858de3389e..dd69e9165c70 100644 --- a/js/accordion/static/StaticAccordion.svelte +++ b/js/accordion/static/StaticAccordion.svelte @@ -5,6 +5,7 @@ import type { LoadingStatus } from "@gradio/statustracker"; import Column from "@gradio/column"; + import type { Gradio } from "js/utils/src"; export let label: string; export let elem_id: string; @@ -12,10 +13,15 @@ export let visible = true; export let open = true; export let loading_status: LoadingStatus; + export let gradio: Gradio; - + diff --git a/js/annotatedimage/static/AnnotatedImage.svelte b/js/annotatedimage/static/AnnotatedImage.svelte index f0fa9855d4e2..44f5cde62dae 100644 --- a/js/annotatedimage/static/AnnotatedImage.svelte +++ b/js/annotatedimage/static/AnnotatedImage.svelte @@ -6,14 +6,27 @@ import { StatusTracker } from "@gradio/statustracker"; import type { LoadingStatus } from "@gradio/statustracker"; import { type FileData, normalise_file } from "@gradio/upload"; - import { _ } from "svelte-i18n"; + export let elem_id = ""; export let elem_classes: string[] = []; export let visible = true; - export let value: [FileData, [FileData, string][]] | null; - let old_value: [FileData, [FileData, string][]] | null; - let _value: [FileData, [FileData, string][]] | null; - export let label = $_("annotated_image.annotated_image"); + export let value: { + image: FileData; + annotations: { image: FileData; label: string }[] | []; + } | null = null; + let old_value: { + image: FileData; + annotations: { image: FileData; label: string }[] | []; + } | null = null; + let _value: { + image: FileData; + annotations: { image: FileData; label: string }[]; + } | null = null; + export let gradio: Gradio<{ + change: undefined; + select: SelectData; + }>; + export let label = gradio.i18n("annotated_image.annotated_image"); export let show_label = true; export let show_legend = true; export let height: number | undefined; @@ -26,10 +39,6 @@ export let root_url: string; let active: string | null = null; export let loading_status: LoadingStatus; - export let gradio: Gradio<{ - change: undefined; - select: SelectData; - }>; $: { if (value !== old_value) { @@ -37,13 +46,13 @@ gradio.dispatch("change"); } if (value) { - _value = [ - normalise_file(value[0], root, root_url) as FileData, - value[1].map(([file, _label]) => [ - normalise_file(file, root, root_url) as FileData, - _label, - ]), - ]; + _value = { + image: normalise_file(value.image, root, root_url) as FileData, + annotations: value.annotations.map((ann) => ({ + image: normalise_file(ann.image, root, root_url) as FileData, + label: ann.label + })) + }; } else { _value = null; } @@ -55,7 +64,7 @@ active = null; } - function handle_click(i: number): void { + function handle_click(i: number, value: string): void { gradio.dispatch("select", { value: label, index: i, @@ -75,8 +84,16 @@ {scale} {min_width} > - - + +
{#if _value == null} @@ -86,41 +103,41 @@ uploaded file - {#each _value ? _value[1] : [] as [file, label], i} + {#each _value ? _value?.annotations : [] as ann, i} segmentation mask identifying {label} within the uploaded file {/each}
{#if show_legend && _value}
- {#each _value[1] as [_, label], i} + {#each _value.annotations as ann, i} {/each}
diff --git a/js/app/build_plugins.ts b/js/app/build_plugins.ts index 76bcd4086cde..87c1db3313e1 100644 --- a/js/app/build_plugins.ts +++ b/js/app/build_plugins.ts @@ -2,7 +2,7 @@ import type { Plugin } from "vite"; import { parse, HTMLElement } from "node-html-parser"; import { join } from "path"; -import { writeFileSync } from "fs"; +import { writeFileSync, cpSync } from "fs"; export function inject_ejs(): Plugin { return { @@ -89,6 +89,30 @@ export function generate_cdn_entry({ }; } +const RE_SVELTE_IMPORT = + /import\s+([\w*{},\s]+)\s+from\s+['"](svelte|svelte\/internal)['"]/g; + +export function generate_dev_entry({ enable }: { enable: boolean }): Plugin { + return { + name: "generate-dev-entry", + transform(code, id) { + if (!enable) return; + + const new_code = code.replace(RE_SVELTE_IMPORT, (str, $1, $2) => { + return `const ${$1.replace( + " as ", + ": " + )} = window.__gradio__svelte__internal;`; + }); + + return { + code: new_code, + map: null + }; + } + }; +} + function make_entry(script: string): string { const make_script = ` function make_script(src) { @@ -177,3 +201,137 @@ export function handle_ce_css(): Plugin { } }; } + +// generate component importsy + +import * as url from "url"; +const __filename = url.fileURLToPath(import.meta.url); +const __dirname = url.fileURLToPath(new URL(".", import.meta.url)); + +import { readdirSync, existsSync, readFileSync, statSync } from "fs"; + +function get_export_path( + path: string, + root: string, + pkg_json: Record +): string | undefined { + if (!pkg_json.exports) return undefined; + const _path = join(root, "..", `${pkg_json.exports[`./${path}`]}`); + + return existsSync(_path) ? _path : undefined; +} + +function generate_component_imports(): string { + const components = readdirSync(join(__dirname, "..")) + .map((dir) => { + if (!statSync(join(__dirname, "..", dir)).isDirectory()) return undefined; + + const package_json_path = join(__dirname, "..", dir, "package.json"); + if (existsSync(package_json_path)) { + const package_json = JSON.parse( + readFileSync(package_json_path, "utf8") + ); + + const interactive = get_export_path( + "interactive", + package_json_path, + package_json + ); + const example = get_export_path( + "example", + package_json_path, + package_json + ); + const static_dir = get_export_path( + "static", + package_json_path, + package_json + ); + + if (!interactive && !example && !static_dir) return undefined; + + return { + name: package_json.name, + interactive, + example, + static: static_dir + }; + } + return undefined; + }) + .filter((x) => x !== undefined); + + const imports = components.reduce((acc, component) => { + if (!component) return acc; + + const interactive = component.interactive + ? `interactive: () => import("${component.name}/interactive"),\n` + : ""; + const example = component.example + ? `example: () => import("${component.name}/example"),\n` + : ""; + return `${acc}"${component.name.replace("@gradio/", "")}": { + ${interactive} + ${example} + static: () => import("${component.name}/static") + },\n`; + }, ""); + + return imports; +} + +function load_virtual_component_loader(): string { + const loader_path = join(__dirname, "component_loader.js"); + const component_map = ` +const component_map = { + ${generate_component_imports()} +}; +`; + + return `${component_map}\n\n${readFileSync(loader_path, "utf8")}`; +} + +export function inject_component_loader(): Plugin { + const v_id = "virtual:component-loader"; + const resolved_v_id = "\0" + v_id; + + return { + name: "inject-component-loader", + enforce: "pre", + resolveId(id: string) { + if (id === v_id) return resolved_v_id; + }, + load(id: string) { + if (id === resolved_v_id) { + return load_virtual_component_loader(); + } + } + }; +} + +export function resolve_svelte(enable: boolean): Plugin { + return { + enforce: "pre", + name: "resolve-svelte", + async resolveId(id: string) { + if ( + (enable && id === "./svelte/svelte.js") || + id === "svelte" || + id === "svelte/internal" + ) { + const mod = join( + __dirname, + "..", + "..", + "gradio", + "templates", + "frontend", + "assets", + "svelte", + "svelte.js" + ); + return { id: mod, external: "absolute" }; + } + } + }; +} diff --git a/js/app/component_loader.js b/js/app/component_loader.js new file mode 100644 index 000000000000..949c9b1f4d73 --- /dev/null +++ b/js/app/component_loader.js @@ -0,0 +1,55 @@ +// @ts-nocheck + +export async function load_component(api_url, name, mode, id) { + const comps = window.__GRADIO__CC__; + + const _component_map = { + // eslint-disable-next-line no-undef + ...component_map, + ...(!comps ? {} : comps) + }; + try { + //@ts-ignore + const c = await ( + _component_map?.[id]?.[mode] || // for dev mode custom components + _component_map?.[name]?.[mode] || + _component_map?.[name]?.["static"] + )(); + return { + name, + component: c + }; + } catch (e) { + try { + await load_css(`${api_url}/custom_component/${id}/${mode}/style.css`); + const c = await import( + /* @vite-ignore */ `${api_url}/custom_component/${id}/${mode}/index.js` + ); + return { + name, + component: c + }; + } catch (e) { + if (mode === "example") { + return { + name, + component: await import("@gradio/fallback/example") + }; + } + console.error(`failed to load: ${name}`); + console.error(e); + throw e; + } + } +} + +function load_css(url) { + return new Promise((resolve, reject) => { + const link = document.createElement("link"); + link.rel = "stylesheet"; + link.href = url; + document.head.appendChild(link); + link.onload = () => resolve(); + link.onerror = () => reject(); + }); +} diff --git a/js/app/index.html b/js/app/index.html index 05f1095a1e28..ba14140f7a37 100644 --- a/js/app/index.html +++ b/js/app/index.html @@ -34,7 +34,7 @@ /> - - -{#if value} - -{/if} diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Audio.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Audio.svelte deleted file mode 100644 index 164fd827db8c..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Audio.svelte +++ /dev/null @@ -1,29 +0,0 @@ - - -
- {label} -
- {#each interpretation as interpret_value} -
- {/each} -
-
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte deleted file mode 100644 index 80ff970080e2..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Checkbox.svelte +++ /dev/null @@ -1,88 +0,0 @@ - - -
- {label} - -
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/CheckboxGroup.svelte b/js/app/src/components/Interpretation/InterpretationComponents/CheckboxGroup.svelte deleted file mode 100644 index 80c71c49295e..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/CheckboxGroup.svelte +++ /dev/null @@ -1,107 +0,0 @@ - - -
- {label} - {#each choices as choice, i} - - {/each} -
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Dropdown.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Dropdown.svelte deleted file mode 100644 index 1878b74d9640..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Dropdown.svelte +++ /dev/null @@ -1,49 +0,0 @@ - - -
- {label} - -
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Image.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Image.svelte deleted file mode 100644 index d50202ba10d8..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Image.svelte +++ /dev/null @@ -1,106 +0,0 @@ - - -
- {label} -
-
- -
- uploaded input -
-
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Number.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Number.svelte deleted file mode 100644 index b62c361d3cdf..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Number.svelte +++ /dev/null @@ -1,40 +0,0 @@ - - -
- {label} -
- {#each interpretation as interpret_value} -
- {interpret_value[0]} -
- {/each} -
-
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Radio.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Radio.svelte deleted file mode 100644 index 942cfbe94b46..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Radio.svelte +++ /dev/null @@ -1,57 +0,0 @@ - - -
- {label} - {#each choices as choice, i} - - {/each} -
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Slider.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Slider.svelte deleted file mode 100644 index bb07a9fed206..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Slider.svelte +++ /dev/null @@ -1,79 +0,0 @@ - - -
- {label} - -
- {#each interpretation as interpret_value} -
- {/each} -
-
- {original} -
-
- - diff --git a/js/app/src/components/Interpretation/InterpretationComponents/Textbox.svelte b/js/app/src/components/Interpretation/InterpretationComponents/Textbox.svelte deleted file mode 100644 index ba4f4822f07a..000000000000 --- a/js/app/src/components/Interpretation/InterpretationComponents/Textbox.svelte +++ /dev/null @@ -1,31 +0,0 @@ - - -
- {label} - {#each interpretation as [text, saliency]} - - {text} - - {/each} -
- - diff --git a/js/app/src/components/Interpretation/directory.ts b/js/app/src/components/Interpretation/directory.ts deleted file mode 100644 index 020865fbea01..000000000000 --- a/js/app/src/components/Interpretation/directory.ts +++ /dev/null @@ -1,21 +0,0 @@ -import InterpretationNumber from "./InterpretationComponents/Number.svelte"; -import InterpretationDropdown from "./InterpretationComponents/Dropdown.svelte"; -import InterpretationCheckbox from "./InterpretationComponents/Checkbox.svelte"; -import InterpretationCheckboxGroup from "./InterpretationComponents/CheckboxGroup.svelte"; -import InterpretationSlider from "./InterpretationComponents/Slider.svelte"; -import InterpretationRadio from "./InterpretationComponents/Radio.svelte"; -import InterpretationImage from "./InterpretationComponents/Image.svelte"; -import InterpretationAudio from "./InterpretationComponents/Audio.svelte"; -import InterpretationTextbox from "./InterpretationComponents/Textbox.svelte"; - -export const component_map = { - audio: InterpretationAudio, - dropdown: InterpretationDropdown, - checkbox: InterpretationCheckbox, - checkboxgroup: InterpretationCheckboxGroup, - number: InterpretationNumber, - slider: InterpretationSlider, - radio: InterpretationRadio, - image: InterpretationImage, - textbox: InterpretationTextbox -}; diff --git a/js/app/src/components/Interpretation/index.ts b/js/app/src/components/Interpretation/index.ts deleted file mode 100644 index 1fb581699c9d..000000000000 --- a/js/app/src/components/Interpretation/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from "./Interpretation.svelte"; diff --git a/js/app/src/components/Interpretation/utils.ts b/js/app/src/components/Interpretation/utils.ts deleted file mode 100644 index 70110d6a04d1..000000000000 --- a/js/app/src/components/Interpretation/utils.ts +++ /dev/null @@ -1,59 +0,0 @@ -export const getSaliencyColor = (value: number): string => { - var color: [number, number, number] | null = null; - if (value < 0) { - color = [52, 152, 219]; - } else { - color = [231, 76, 60]; - } - return colorToString(interpolate(Math.abs(value), [255, 255, 255], color)); -}; - -const interpolate = ( - val: number, - rgb1: [number, number, number], - rgb2: [number, number, number] -): [number, number, number] => { - if (val > 1) { - val = 1; - } - val = Math.sqrt(val); - var rgb: [number, number, number] = [0, 0, 0]; - var i; - for (i = 0; i < 3; i++) { - rgb[i] = Math.round(rgb1[i] * (1.0 - val) + rgb2[i] * val); - } - return rgb; -}; - -const colorToString = (rgb: [number, number, number]): string => { - return "rgb(" + rgb[0] + ", " + rgb[1] + ", " + rgb[2] + ")"; -}; - -export function getObjectFitSize( - contains: boolean /* true = contain, false = cover */, - containerWidth: number, - containerHeight: number, - width: number, - height: number -): { width: number; height: number; x: number; y: number } { - var doRatio = width / height; - var cRatio = containerWidth / containerHeight; - var targetWidth = 0; - var targetHeight = 0; - var test = contains ? doRatio > cRatio : doRatio < cRatio; - - if (test) { - targetWidth = containerWidth; - targetHeight = targetWidth / doRatio; - } else { - targetHeight = containerHeight; - targetWidth = targetHeight * doRatio; - } - - return { - width: targetWidth, - height: targetHeight, - x: (containerWidth - targetWidth) / 2, - y: (containerHeight - targetHeight) / 2 - }; -} diff --git a/js/app/src/components/State/index.ts b/js/app/src/components/State/index.ts deleted file mode 100644 index 0d9ecfae7647..000000000000 --- a/js/app/src/components/State/index.ts +++ /dev/null @@ -1 +0,0 @@ -export { default } from "@gradio/state"; diff --git a/js/app/src/components/directory.ts b/js/app/src/components/directory.ts deleted file mode 100644 index 8c115d473e1a..000000000000 --- a/js/app/src/components/directory.ts +++ /dev/null @@ -1,142 +0,0 @@ -export const component_map = { - accordion: { - static: () => import("@gradio/accordion/static") - }, - annotatedimage: { - static: () => import("@gradio/annotatedimage/static") - }, - audio: { - static: () => import("@gradio/audio/static"), - interactive: () => import("@gradio/audio/interactive") - }, - box: { - static: () => import("@gradio/box/static") - }, - button: { - static: () => import("@gradio/button/static") - }, - chatbot: { - static: () => import("@gradio/chatbot/static") - }, - checkbox: { - static: () => import("@gradio/checkbox/static"), - interactive: () => import("@gradio/checkbox/interactive") - }, - checkboxgroup: { - static: () => import("@gradio/checkboxgroup/static"), - interactive: () => import("@gradio/checkboxgroup/interactive") - }, - code: { - static: () => import("@gradio/code/static"), - interactive: () => import("@gradio/code/interactive") - }, - colorpicker: { - static: () => import("@gradio/colorpicker/static"), - interactive: () => import("@gradio/colorpicker/interactive") - }, - column: { - static: () => import("@gradio/column/static") - }, - dataframe: { - static: () => import("@gradio/dataframe/static"), - interactive: () => import("@gradio/dataframe/interactive") - }, - dataset: { - static: () => import("./Dataset") - }, - dropdown: { - static: () => import("@gradio/dropdown/static"), - interactive: () => import("@gradio/dropdown/interactive") - }, - file: { - static: () => import("@gradio/file/static"), - interactive: () => import("@gradio/file/interactive") - }, - form: { - static: () => import("@gradio/form/static") - }, - gallery: { - static: () => import("@gradio/gallery/static") - }, - group: { - static: () => import("@gradio/group/static") - }, - highlightedtext: { - static: () => import("@gradio/highlightedtext/static"), - interactive: () => import("@gradio/highlightedtext/interactive") - }, - fileexplorer: { - static: () => import("@gradio/fileexplorer/static"), - interactive: () => import("@gradio/fileexplorer/interactive") - }, - html: { - static: () => import("@gradio/html/static") - }, - image: { - static: () => import("@gradio/image/static"), - interactive: () => import("@gradio/image/interactive") - }, - interpretation: { - static: () => import("./Interpretation"), - interactive: () => import("./Interpretation") - }, - json: { - static: () => import("@gradio/json/static") - }, - label: { - static: () => import("@gradio/label/static") - }, - markdown: { - static: () => import("@gradio/markdown/static") - }, - model3d: { - static: () => import("@gradio/model3d/static"), - interactive: () => import("@gradio/model3d/interactive") - }, - number: { - static: () => import("@gradio/number/static"), - interactive: () => import("@gradio/number/interactive") - }, - plot: { - static: () => import("@gradio/plot/static") - }, - radio: { - static: () => import("@gradio/radio/static"), - interactive: () => import("@gradio/radio/interactive") - }, - row: { - static: () => import("@gradio/row/static") - }, - slider: { - static: () => import("@gradio/slider/static"), - interactive: () => import("@gradio/slider/interactive") - }, - state: { - static: () => import("./State") - }, - statustracker: { - static: () => import("@gradio/statustracker/static") - }, - tabs: { - static: () => import("@gradio/tabs/static") - }, - tabitem: { - static: () => import("@gradio/tabitem/static") - }, - textbox: { - static: () => import("@gradio/textbox/static"), - interactive: () => import("@gradio/textbox/interactive") - }, - timeseries: { - static: () => import("@gradio/timeseries/static"), - interactive: () => import("@gradio/timeseries/interactive") - }, - uploadbutton: { - static: () => import("@gradio/uploadbutton/static"), - interactive: () => import("@gradio/uploadbutton/interactive") - }, - video: { - static: () => import("@gradio/video/static"), - interactive: () => import("@gradio/video/interactive") - } -}; diff --git a/js/app/src/components/utils/helpers.test.ts b/js/app/src/components/utils/helpers.test.ts deleted file mode 100644 index d0204989786d..000000000000 --- a/js/app/src/components/utils/helpers.test.ts +++ /dev/null @@ -1,93 +0,0 @@ -import { assert, describe, test } from "vitest"; -import { prettyBytes, deepCopy, randInt, getNextColor } from "./helpers"; - -describe("prettyBytes", () => { - test("handle B", () => { - assert.equal(prettyBytes(10), "10.0 B"); - }); - - test("handles KB", () => { - assert.equal(prettyBytes(1_300), "1.3 KB"); - }); - - test("handles MB", () => { - assert.equal(prettyBytes(1_300_000), "1.2 MB"); - }); - - test("handles GB", () => { - assert.equal(prettyBytes(1_300_000_123), "1.2 GB"); - }); - - test("handles PB", () => { - assert.equal(prettyBytes(1_300_000_123_000), "1.2 PB"); - }); -}); - -describe("deepCopy", () => { - test("handle arrays", () => { - const array = [1, 2, 3]; - const copy = deepCopy(array); - assert.ok(array !== copy); - assert.deepEqual(array, copy); - }); - - test("handle objects", () => { - const obj = { a: 1, b: 2, c: 3 }; - const copy = deepCopy(obj); - assert.ok(obj !== copy); - assert.deepEqual(obj, copy); - }); - - test("handle complex structures", () => { - const obj = { - a: 1, - b: { - a: 1, - b: { - a: 1, - b: { a: 1, b: 2, c: 3 }, - c: [ - 1, - 2, - { a: 1, b: { a: 1, b: { a: 1, b: 2, c: 3 }, c: [1, 2, 3] } } - ] - }, - c: 3 - }, - c: 3 - }; - const copy = deepCopy(obj); - assert.ok(obj !== copy); - assert.deepEqual(obj, copy); - }); -}); - -describe("randInt", () => { - test("returns a random number", () => { - assert.typeOf(randInt(0, 10), "number"); - }); - - test("respects min and max", () => { - const n = randInt(0, 10); - assert.ok(n >= 0 && n <= 10); - }); - - test("respects min and max when negative", () => { - const n = randInt(-100, -10); - assert.ok(n >= -100 && n <= -10); - }); -}); - -describe("getNextColor", () => { - test("returns a color", () => { - assert.equal(getNextColor(0), "rgba(255, 99, 132, 1)"); - }); - - test("returns a color when index is very high", () => { - assert.ok( - getNextColor(999999999).match( - /rgba\([0-9]{1,3}, [0-9]{1,3}, [0-9]{1,3}, [0-9]\)/ - ) - ); - }); -}); diff --git a/js/app/src/components/utils/helpers.ts b/js/app/src/components/utils/helpers.ts deleted file mode 100644 index 99cd6f3d3b6c..000000000000 --- a/js/app/src/components/utils/helpers.ts +++ /dev/null @@ -1,71 +0,0 @@ -// import mime from "mime-types"; - -export const playable = (): boolean => { - // let video_element = document.createElement("video"); - // let mime_type = mime.lookup(filename); - // return video_element.canPlayType(mime_type) != ""; - return true; // FIX BEFORE COMMIT - mime import causing issues -}; - -export const deepCopy = (obj: T): T => { - return JSON.parse(JSON.stringify(obj)); -}; - -export function randInt(min: number, max: number): number { - return Math.floor(Math.random() * (max - min) + min); -} - -export const getNextColor = (index: number, alpha = 1): string => { - let default_colors = [ - [255, 99, 132], - [54, 162, 235], - [240, 176, 26], - [153, 102, 255], - [75, 192, 192], - [255, 159, 64], - [194, 88, 74], - [44, 102, 219], - [44, 163, 23], - [191, 46, 217], - [160, 162, 162], - [163, 151, 27] - ]; - if (index < default_colors.length) { - var color_set = default_colors[index]; - } else { - var color_set = [randInt(64, 196), randInt(64, 196), randInt(64, 196)]; - } - return ( - "rgba(" + - color_set[0] + - ", " + - color_set[1] + - ", " + - color_set[2] + - ", " + - alpha + - ")" - ); -}; - -export const prettyBytes = (bytes: number): string => { - let units = ["B", "KB", "MB", "GB", "PB"]; - let i = 0; - while (bytes > 1024) { - bytes /= 1024; - i++; - } - let unit = units[i]; - return bytes.toFixed(1) + " " + unit; -}; - -export const prettySI = (num: number): string => { - let units = ["", "k", "M", "G", "T", "P", "E", "Z"]; - let i = 0; - while (num > 1000 && i < units.length - 1) { - num /= 1000; - i++; - } - let unit = units[i]; - return (Number.isInteger(num) ? num : num.toFixed(1)) + unit; -}; diff --git a/js/app/src/gradio_helper.ts b/js/app/src/gradio_helper.ts index 8963a5ddda1b..19a43957b702 100644 --- a/js/app/src/gradio_helper.ts +++ b/js/app/src/gradio_helper.ts @@ -1,26 +1,33 @@ -// import { _ } from "svelte-i18n"; +import { format } from "svelte-i18n"; +import { get } from "svelte/store"; -export class Gradio> { +const x = get(format); + +export type I18nFormatter = typeof x; +export class Gradio = Record> { #id: number; theme: string; version: string; - // i18n: typeof _; + i18n: typeof x; #el: HTMLElement; root: string; + autoscroll: boolean; constructor( id: number, el: HTMLElement, theme: string, version: string, - root: string + root: string, + autoscroll: boolean ) { this.#id = id; this.theme = theme; this.version = version; this.#el = el; - // this.i18n = _; + this.i18n = get(format); this.root = root; + this.autoscroll = autoscroll; } dispatch(event_name: E, data?: T[E]): void { diff --git a/js/app/src/main.ts b/js/app/src/main.ts index 4f4caadd13f5..b2bd4a91041a 100644 --- a/js/app/src/main.ts +++ b/js/app/src/main.ts @@ -1,8 +1,15 @@ -import "@gradio/theme"; +import "@gradio/theme/src/reset.css"; +import "@gradio/theme/src/global.css"; +import "@gradio/theme/src/pollen.css"; +import "@gradio/theme/src/typography.css"; import { client, upload_files } from "@gradio/client"; import { mount_css } from "./css"; -import Index from "./Index.svelte"; -import type { ThemeMode } from "./components/types"; +import type Index from "./Index.svelte"; + +import type { ThemeMode } from "./types"; + +//@ts-ignore +import * as svelte from "./svelte/svelte.js"; declare let BUILD_MODE: string; declare let GRADIO_VERSION: string; @@ -13,7 +20,24 @@ let FONTS: string | []; FONTS = "__FONTS_CSS__"; +//@ts-ignore +let IndexComponent; function create_custom_element(): void { + const o = { + SvelteComponent: svelte.SvelteComponent + }; + for (const key in svelte) { + if (key === "SvelteComponent") continue; + if (key === "SvelteComponentDev") { + //@ts-ignore + o[key] = o["SvelteComponent"]; + } else { + //@ts-ignore + o[key] = svelte[key]; + } + } + //@ts-ignore + window.__gradio__svelte__internal = o; class GradioApp extends HTMLElement { control_page_title: string | null; initial_height: string; @@ -49,6 +73,7 @@ function create_custom_element(): void { } async connectedCallback(): Promise { + IndexComponent = (await import("./Index.svelte")).default; this.loading = true; if (this.app) { @@ -73,7 +98,7 @@ function create_custom_element(): void { observer.observe(this, { childList: true }); - this.app = new Index({ + this.app = new IndexComponent({ target: this, props: { // embed source @@ -140,7 +165,7 @@ function create_custom_element(): void { this.src = new_val; } - this.app = new Index({ + this.app = new IndexComponent({ target: this, props: { // embed source diff --git a/js/app/src/stores.ts b/js/app/src/stores.ts index 4642ff5b0466..a411249b4ed8 100644 --- a/js/app/src/stores.ts +++ b/js/app/src/stores.ts @@ -163,4 +163,3 @@ export function create_loading_status_store(): LoadingStatusStore { } export type LoadingStatusType = ReturnType; -export const app_state = writable({ autoscroll: false }); diff --git a/js/app/src/components/types.ts b/js/app/src/types.ts similarity index 89% rename from js/app/src/components/types.ts rename to js/app/src/types.ts index e9c6d62b0003..dae5a95a4696 100644 --- a/js/app/src/components/types.ts +++ b/js/app/src/types.ts @@ -1,12 +1,14 @@ import type { ComponentType } from "svelte"; import type { SvelteComponent } from "svelte"; -import type { component_map } from "./directory"; - -type ComponentMap = typeof component_map; +interface ComponentImport { + interactive: SvelteComponent; + static: SvelteComponent; + example: SvelteComponent; +} export interface ComponentMeta { - type: keyof ComponentMap; + type: string; id: number; has_modes: boolean; props: Record & { mode: "interactive" | "static" }; @@ -15,6 +17,7 @@ export interface ComponentMeta { documentation?: Documentation; children?: ComponentMeta[]; value?: any; + component_class_id: string; } export interface DependencyTypes { diff --git a/js/app/src/vite-env-override.d.ts b/js/app/src/vite-env-override.d.ts index 5610442ae24d..4a525fa83272 100644 --- a/js/app/src/vite-env-override.d.ts +++ b/js/app/src/vite-env-override.d.ts @@ -4,3 +4,16 @@ declare module "*.whl" { const content: string; export default content; } + +// virtual module component type definition +declare module "virtual:component-loader" { + export function load_component( + api_url: string, + name: string, + mode: "interactive" | "static" | "example", + id: string + ): Promise<{ + name: ComponentMeta["type"]; + component: LoadedComponent; + }>; +} diff --git a/js/app/test/components.test.ts b/js/app/test/components.test.ts index 06add2b61ed2..a92c90f74951 100644 --- a/js/app/test/components.test.ts +++ b/js/app/test/components.test.ts @@ -30,7 +30,7 @@ import StaticNumber from "@gradio/number/static"; import StaticRadio from "@gradio/radio/static"; import StaticSlider from "@gradio/slider/static"; import StaticTextbox from "@gradio/textbox/static"; -import StaticTimeSeries from "@gradio/timeseries/static"; +// import StaticTimeSeries from "@gradio/timeseries/static"; import StaticUploadButton from "@gradio/uploadbutton/static"; import StaticVideo from "@gradio/video/static"; @@ -46,7 +46,7 @@ import InteractiveNumber from "@gradio/number/interactive"; import InteractiveRadio from "@gradio/radio/interactive"; import InteractiveSlider from "@gradio/slider/interactive"; import InteractiveTextbox from "@gradio/textbox/interactive"; -import InteractiveTimeSeries from "@gradio/timeseries/interactive"; +// import InteractiveTimeSeries from "@gradio/timeseries/interactive"; import InteractiveUploadButton from "@gradio/uploadbutton/interactive"; import InteractiveVideo from "@gradio/video/interactive"; import { LoadingStatus } from "@gradio/statustracker"; @@ -118,8 +118,8 @@ const components = [ ["StaticSlider", StaticSlider, {}], ["InteractiveTextbox", InteractiveTextbox, { container: false }], ["StaticTextbox", StaticTextbox, { container: false }], - ["InteractiveTimeSeries", InteractiveTimeSeries, {}], - ["StaticTimeSeries", StaticTimeSeries, {}], + // ["InteractiveTimeSeries", InteractiveTimeSeries, {}], + // ["StaticTimeSeries", StaticTimeSeries, {}], ["InteractiveUploadButton", InteractiveUploadButton, {}], ["StaticUploadButton", StaticUploadButton, {}], ["InteractiveVideo", InteractiveVideo, {}], diff --git a/js/app/vite.config.ts b/js/app/vite.config.ts index 69e5bf952636..0cda2500c7af 100644 --- a/js/app/vite.config.ts +++ b/js/app/vite.config.ts @@ -11,20 +11,29 @@ import { resolve } from "path"; const version_path = resolve(__dirname, "../../gradio/package.json"); const theme_token_path = resolve(__dirname, "../theme/src/tokens.css"); -const version_raw = JSON.parse(readFileSync(version_path, { encoding: "utf-8" })).version.trim(); +const version_raw = JSON.parse( + readFileSync(version_path, { encoding: "utf-8" }) +).version.trim(); const version = version_raw.replace(/\./g, "-"); const client_version_path = resolve( __dirname, "../../client/python/gradio_client/package.json" ); -const client_version_raw = JSON.parse(readFileSync(client_version_path, { encoding: "utf-8" })).version.trim(); +const client_version_raw = JSON.parse( + readFileSync(client_version_path, { + encoding: "utf-8" + }) +).version.trim(); import { inject_ejs, patch_dynamic_import, generate_cdn_entry, - handle_ce_css + generate_dev_entry, + handle_ce_css, + inject_component_loader, + resolve_svelte } from "./build_plugins"; const GRADIO_VERSION = process.env.GRADIO_VERSION || "asd_stub_asd"; @@ -36,6 +45,11 @@ const TEST_MODE = process.env.TEST_MODE || "jsdom"; //@ts-ignore export default defineConfig(({ mode }) => { + const targets = { + "production:cdn": "../../gradio/templates/cdn", + "production:local": "../../gradio/templates/frontend", + "dev:custom": "../../gradio/templates/frontend" + }; const CDN_URL = mode === "production:cdn" ? CDN : "/"; const production = mode === "production:cdn" || @@ -57,35 +71,39 @@ export default defineConfig(({ mode }) => { sourcemap: true, target: "esnext", minify: production, - outDir: is_lite - ? resolve(__dirname, "../lite/dist") - : `../../gradio/templates/${is_cdn ? "cdn" : "frontend"}`, + outDir: is_lite ? resolve(__dirname, "../lite/dist") : targets[mode], // To build Gradio-lite as a library, we can't use the library mode // like `lib: is_lite && {}` // because it inevitably enables inlining of all the static file assets, // while we need to disable inlining for the wheel files to pass their URLs to `micropip.install()`. // So we build it as an app and only use the bundled JS and CSS files as library assets, ignoring the HTML file. // See also `lite.ts` about it. - rollupOptions: is_lite && { - input: "./lite.html", - output: { - // To use it as a library, we don't add the hash to the file name. - entryFileNames: "lite.js", - assetFileNames: (file) => { - if (file.name?.endsWith(".whl")) { - // Python wheel files must follow the naming rules to be installed, so adding a hash to the name is not allowed. - return `assets/[name].[ext]`; - } - if (file.name === "lite.css") { + rollupOptions: is_lite + ? { + input: "./lite.html", + output: { // To use it as a library, we don't add the hash to the file name. - return `[name].[ext]`; - } else { - return `assets/[name]-[hash].[ext]`; + entryFileNames: "lite.js", + assetFileNames: (file) => { + if (file.name?.endsWith(".whl")) { + // Python wheel files must follow the naming rules to be installed, so adding a hash to the name is not allowed. + return `assets/[name].[ext]`; + } + if (file.name === "lite.css") { + // To use it as a library, we don't add the hash to the file name. + return `[name].[ext]`; + } else { + return `assets/[name]-[hash].[ext]`; + } + } } - } - } - } + } + : { + external: ["./svelte/svelte.js"], + makeAbsoluteExternalsRelative: false + } }, + define: { BUILD_MODE: production ? JSON.stringify("prod") : JSON.stringify("dev"), BACKEND_URL: production @@ -117,10 +135,13 @@ export default defineConfig(({ mode }) => { } }, plugins: [ + resolve_svelte(mode === "development"), + svelte({ inspector: true, compilerOptions: { - dev: !production + dev: true, + discloseVersion: false }, hot: !process.env.VITEST && !production, preprocess: sveltePreprocess({ @@ -132,6 +153,7 @@ export default defineConfig(({ mode }) => { } }) }), + generate_dev_entry({ enable: mode !== "development" }), inject_ejs(), patch_dynamic_import({ mode: is_cdn ? "cdn" : "local", @@ -139,7 +161,8 @@ export default defineConfig(({ mode }) => { cdn_url: CDN_URL }), generate_cdn_entry({ enable: is_cdn, cdn_url: CDN_URL }), - handle_ce_css() + handle_ce_css(), + inject_component_loader() ], test: { setupFiles: [resolve(__dirname, "../../.config/setup_vite_tests.ts")], @@ -148,6 +171,7 @@ export default defineConfig(({ mode }) => { TEST_MODE === "node" ? ["**/*.node-test.{js,mjs,cjs,ts,mts,cts,jsx,tsx}"] : ["**/*.test.{js,mjs,cjs,ts,mts,cts,jsx,tsx}"], + exclude: ["**/node_modules/**", "**/gradio/gradio/**"], globals: true }, resolve: { diff --git a/js/atoms/package.json b/js/atoms/package.json index ceedb93e9cfa..316f9d340cda 100644 --- a/js/atoms/package.json +++ b/js/atoms/package.json @@ -6,7 +6,6 @@ "main": "src/index.ts", "author": "", "license": "ISC", - "private": true, "dependencies": { "@gradio/utils": "workspace:^", "@gradio/icons": "workspace:^" diff --git a/js/atoms/src/ShareButton.svelte b/js/atoms/src/ShareButton.svelte index da5be62f049a..450e02a8621a 100644 --- a/js/atoms/src/ShareButton.svelte +++ b/js/atoms/src/ShareButton.svelte @@ -4,7 +4,7 @@ import { createEventDispatcher } from "svelte"; import type { ShareData } from "@gradio/utils"; import { ShareError } from "@gradio/utils"; - import { _ } from "svelte-i18n"; + import type { I18nFormatter } from "@gradio/utils"; const dispatch = createEventDispatcher<{ share: ShareData; @@ -13,12 +13,13 @@ export let formatter: (arg0: any) => Promise; export let value: any; + export let i18n: I18nFormatter; let pending = false; { try { diff --git a/js/atoms/src/UploadText.svelte b/js/atoms/src/UploadText.svelte index 9925959d04cf..fc32638ba1b1 100644 --- a/js/atoms/src/UploadText.svelte +++ b/js/atoms/src/UploadText.svelte @@ -1,7 +1,8 @@
- {$_(defs[type])} - - {$_("common.or")} - - {$_("upload_text.click_to_upload")} + {i18n(defs[type])} + - {i18n("common.or")} - + {i18n("upload_text.click_to_upload")}
+ \ No newline at end of file diff --git a/js/gallery/static/StaticGallery.svelte b/js/gallery/static/StaticGallery.svelte index cf07e19143e9..6b11348da68a 100644 --- a/js/gallery/static/StaticGallery.svelte +++ b/js/gallery/static/StaticGallery.svelte @@ -14,8 +14,10 @@ export let elem_id = ""; export let elem_classes: string[] = []; export let visible = true; - export let value: (FileData | string | [FileData | string, string])[] | null = - null; + export let value: + | { image: FileData; caption: string | null }[] + | null + | null = null; export let container = true; export let scale: number | null = null; export let min_width: number | undefined = undefined; @@ -49,7 +51,11 @@ allow_overflow={false} height={typeof height === "number" ? height : undefined} > - + gradio.dispatch("change", value)} on:select={(e) => gradio.dispatch("select", e.detail)} @@ -69,5 +75,6 @@ bind:selected_index {show_share_button} {show_download_button} + i18n={gradio.i18n} /> diff --git a/js/group/CHANGELOG.md b/js/group/CHANGELOG.md new file mode 100644 index 000000000000..d232c8ea37b3 --- /dev/null +++ b/js/group/CHANGELOG.md @@ -0,0 +1,7 @@ +# @gradio/group + +## 0.0.2-beta.0 + +### Features + +- [#5648](https://github.com/gradio-app/gradio/pull/5648) [`c573e2339`](https://github.com/gradio-app/gradio/commit/c573e2339b86c85b378dc349de5e9223a3c3b04a) - Publish all components to npm. Thanks [@freddyaboulton](https://github.com/freddyaboulton)! \ No newline at end of file diff --git a/js/group/package.json b/js/group/package.json index 27ae8e1b5971..15ef952a2726 100644 --- a/js/group/package.json +++ b/js/group/package.json @@ -1,12 +1,12 @@ { "name": "@gradio/group", - "version": "0.0.1", + "version": "0.0.2-beta.0", "description": "Gradio UI packages", "type": "module", "main": "./static/index.ts", "author": "", "license": "ISC", - "private": true, + "private": false, "main_changeset": true, "exports": { ".": "./static/index.ts", diff --git a/js/highlightedtext/HighlightedText.stories.svelte b/js/highlightedtext/HighlightedText.stories.svelte index 84bb7dbb3ad4..0023329f391c 100644 --- a/js/highlightedtext/HighlightedText.stories.svelte +++ b/js/highlightedtext/HighlightedText.stories.svelte @@ -1,7 +1,7 @@ diff --git a/js/highlightedtext/interactive/Highlightedtext.svelte b/js/highlightedtext/interactive/Highlightedtext.svelte index fc11837cc6bd..3d4986240b43 100644 --- a/js/highlightedtext/interactive/Highlightedtext.svelte +++ b/js/highlightedtext/interactive/Highlightedtext.svelte @@ -6,7 +6,10 @@ import { correct_color_map, merge_elements } from "../utils"; import LabelInput from "./LabelInput.svelte"; - export let value: [string, string | number | null][] = []; + export let value: { + token: string; + class_or_confidence: string | number | null; + }[] = []; export let show_legend = false; export let color_map: Record = {}; export let selectable = false; @@ -37,32 +40,40 @@ if ( selection?.toString() && activeElementIndex !== -1 && - value[activeElementIndex][0].toString().includes(selection.toString()) + value[activeElementIndex].token.toString().includes(selection.toString()) ) { const tempFlag = Symbol(); - const str = value[activeElementIndex][0]; + const str = value[activeElementIndex].token; const [before, selected, after] = [ str.substring(0, startIndex), str.substring(startIndex, endIndex), - str.substring(endIndex), + str.substring(endIndex) ]; - let tempValue: [string, string | number | null, symbol?][] = [ + let tempValue: { + token: string; + class_or_confidence: string | number | null; + flag?: symbol; + }[] = [ ...value.slice(0, activeElementIndex), - [before, null], - [selected, mode === "scores" ? 1 : "label", tempFlag], // add a temp flag to the new highlighted text element - [after, null], - ...value.slice(activeElementIndex + 1), + { token: before, class_or_confidence: null }, + { + token: selected, + class_or_confidence: mode === "scores" ? 1 : "label", + flag: tempFlag + }, // add a temp flag to the new highlighted text element + { token: after, class_or_confidence: null }, + ...value.slice(activeElementIndex + 1) ]; // store the index of the new highlighted text element and remove the flag - labelToEdit = tempValue.findIndex(([_, __, flag]) => flag === tempFlag); - tempValue[labelToEdit].pop(); + labelToEdit = tempValue.findIndex(({ flag }) => flag === tempFlag); + // tempValue[labelToEdit].pop(); // remove elements with empty labels - tempValue = tempValue.filter((item) => item[0].trim() !== ""); - value = tempValue as [string, string | number | null][]; + tempValue = tempValue.filter((item) => item.token.trim() !== ""); + value = tempValue.map(({ flag, ...rest }) => rest); handleValueChange(); document.getElementById(`label-input-${labelToEdit}`)?.focus(); @@ -80,8 +91,8 @@ } function removeHighlightedText(index: number): void { - if (index < 0 || index >= value.length) return; - value[index][1] = null; + if (!value || index < 0 || index >= value.length) return; + value[index].class_or_confidence = null; value = merge_elements(value, "equal"); handleValueChange(); window.getSelection()?.empty(); @@ -105,13 +116,13 @@ color_map = {}; } if (value.length > 0) { - for (let [_, label] of value) { - if (label !== null) { - if (typeof label === "string") { + for (let entry of value) { + if (entry.class_or_confidence !== null) { + if (typeof entry.class_or_confidence === "string") { mode = "categories"; - if (!(label in color_map)) { + if (!(entry.class_or_confidence in color_map)) { let color = get_next_color(Object.keys(color_map).length); - color_map[label] = color; + color_map[entry.class_or_confidence] = color; } } else { mode = "scores"; @@ -149,11 +160,11 @@ function handleSelect( i: number, text: string, - category: string | number | null + class_or_confidence: string | number | null ): void { dispatch("select", { index: i, - value: [text, category], + value: [text, class_or_confidence] }); } @@ -162,23 +173,23 @@ {#if mode === "categories"} {#if show_legend}
{#if _color_map} - {#each Object.entries(_color_map) as [category, color], i} + {#each Object.entries(_color_map) as [class_or_confidence, color], i}
handle_mouseover(category)} - on:focus={() => handle_mouseover(category)} + on:mouseover={() => handle_mouseover(class_or_confidence)} + on:focus={() => handle_mouseover(class_or_confidence)} on:mouseout={() => handle_mouseout()} on:blur={() => handle_mouseout()} - class="category-label" + class="class_or_confidence-label" style={"background-color:" + color.secondary} > - {category} + {class_or_confidence}
{/each} {/if} @@ -186,33 +197,33 @@ {/if}
- {#each value as [text, category], i} - {#each splitTextByNewline(text) as line, j} + {#each value as { token, class_or_confidence }, i} + {#each splitTextByNewline(token) as line, j} {#if line.trim() !== ""} - + { - if (category !== null) { - handleSelect(i, text, category); + if (class_or_confidence !== null) { + handleSelect(i, token, class_or_confidence); } }} on:keydown={(e) => { - if (category !== null) { + if (class_or_confidence !== null) { labelToEdit = i; - handleSelect(i, text, category); + handleSelect(i, token, class_or_confidence); } else { handleKeydownSelection(e); } @@ -221,7 +232,7 @@ on:mouseover={() => (activeElementIndex = i)} > handleKeydownSelection(e)} @@ -230,37 +241,37 @@ on:click={() => (labelToEdit = i)} tabindex="0">{line} - {#if !show_legend && category !== null && labelToEdit !== i} + {#if !show_legend && class_or_confidence !== null && labelToEdit !== i} (labelToEdit = i)} on:keydown={() => (labelToEdit = i)} > - {category} + {class_or_confidence} {/if} - {#if labelToEdit === i && category !== null} + {#if labelToEdit === i && class_or_confidence !== null}   {/if} - {#if category !== null} + {#if class_or_confidence !== null} {/if} - {#if j < splitTextByNewline(text).length - 1} + {#if j < splitTextByNewline(token).length - 1}
{/if} {/each} @@ -293,15 +304,19 @@ {/if}
- {#each value as [text, _score], i} - {@const score = typeof _score === "string" ? parseInt(_score) : _score} + {#each value as { token, class_or_confidence }, i} + {@const score = + typeof class_or_confidence === "string" + ? parseInt(class_or_confidence) + : class_or_confidence} (activeElementIndex = i)} on:focus={() => (activeElementIndex = i)} on:click={() => (labelToEdit = i)} @@ -316,22 +331,22 @@ : "239, 68, 60," + score) + ")"} > - {text} - {#if _score && labelToEdit === i} + {token} + {#if class_or_confidence && labelToEdit === i} {/if} - {#if _score && activeElementIndex === i} + {#if class_or_confidence && activeElementIndex === i} ; export let elem_id = ""; export let elem_classes: string[] = []; export let visible = true; - export let value: [string, string | number | null][]; + export let value: { + token: string; + class_or_confidence: string | number | null; + }[] = []; export let mode: "static" | "interactive"; export let show_legend: boolean; export let color_map: Record = {}; - export let label = $_("highlighted_text.highlighted_text"); + export let label = gradio.i18n("highlighted_text.highlighted_text"); export let container = true; export let scale: number | null = null; export let min_width: number | undefined = undefined; export let selectable = false; export let combine_adjacent = false; - export let gradio: Gradio<{ - select: SelectData; - change: typeof value; - input: never; - }>; $: if (!color_map && Object.keys(color_map).length) { color_map = color_map; @@ -49,7 +51,11 @@ {scale} {min_width} > - + {#if label} - type HighlightedTextType = [string, string | number | null, symbol?]; + type HighlightedTextType = { + token: string; + class_or_confidence: string | number | null; + }; export let value: HighlightedTextType[]; export let category: string | number | null; @@ -28,15 +31,16 @@ let target = e.target as HTMLInputElement; value = [ ...value.slice(0, elementIndex), - [ - text, - target.value === "" - ? null - : isScoresMode - ? Number(target.value) - : target.value, - ], - ...value.slice(elementIndex + 1), + { + token: text, + class_or_confidence: + target.value === "" + ? null + : isScoresMode + ? Number(target.value) + : target.value + }, + ...value.slice(elementIndex + 1) ]; handleValueChange(); diff --git a/js/highlightedtext/package.json b/js/highlightedtext/package.json index 119b773cf086..e97f6cda5a4f 100644 --- a/js/highlightedtext/package.json +++ b/js/highlightedtext/package.json @@ -6,7 +6,7 @@ "main": "./static/index.ts", "author": "", "license": "ISC", - "private": true, + "private": false, "main_changeset": true, "dependencies": { "@gradio/atoms": "workspace:^", diff --git a/js/highlightedtext/static/Highlightedtext.svelte b/js/highlightedtext/static/Highlightedtext.svelte index b113b4cd2d52..657301b0af5e 100644 --- a/js/highlightedtext/static/Highlightedtext.svelte +++ b/js/highlightedtext/static/Highlightedtext.svelte @@ -5,7 +5,10 @@ import { createEventDispatcher } from "svelte"; import { correct_color_map } from "../utils"; - export let value: [string, string | number | null][] = []; + export let value: { + token: string; + class_or_confidence: string | number | null; + }[] = []; export let show_legend = false; export let color_map: Record = {}; export let selectable = false; @@ -29,13 +32,13 @@ color_map = {}; } if (value.length > 0) { - for (let [_, label] of value) { - if (label !== null) { - if (typeof label === "string") { + for (let entry of value) { + if (entry.class_or_confidence !== null) { + if (typeof entry.class_or_confidence === "string") { mode = "categories"; - if (!(label in color_map)) { + if (!(entry.class_or_confidence in color_map)) { let color = get_next_color(Object.keys(color_map).length); - color_map[label] = color; + color_map[entry.class_or_confidence] = color; } } else { mode = "scores"; @@ -91,48 +94,49 @@
{/if}
- {#each value as [text, category], i} - {#each splitTextByNewline(text) as line, j} + {#each value as v, i} + {#each splitTextByNewline(v.token) as line, j} {#if line.trim() !== ""} { dispatch("select", { index: i, - value: [text, category], + value: [v.token, v.class_or_confidence] }); }} > {line} - {#if !show_legend && category !== null} + {#if !show_legend && v.class_or_confidence !== null}   - {category} + {v.class_or_confidence} {/if} {/if} - {#if j < splitTextByNewline(text).length - 1} + {#if j < splitTextByNewline(v.token).length - 1}
{/if} {/each} @@ -147,8 +151,11 @@
{/if}
- {#each value as [text, _score]} - {@const score = typeof _score === "string" ? parseInt(_score) : _score} + {#each value as v} + {@const score = + typeof v.class_or_confidence === "string" + ? parseInt(v.class_or_confidence) + : v.class_or_confidence} - {text} + {v.token} {/each}
diff --git a/js/highlightedtext/static/StaticHighlightedtext.svelte b/js/highlightedtext/static/StaticHighlightedtext.svelte index 81ea139d1193..fd209276a5aa 100644 --- a/js/highlightedtext/static/StaticHighlightedtext.svelte +++ b/js/highlightedtext/static/StaticHighlightedtext.svelte @@ -1,31 +1,33 @@ @@ -56,7 +58,11 @@ {scale} {min_width} > - + {#if label} - +
): void { if (tool === "color-sketch") { - static_image = detail; + static_image = normalise_file(detail, root, null)?.data; } else { value = (source === "upload" || source === "webcam") && tool === "sketch" - ? { image: detail, mask: null } - : detail; + ? { image: normalise_file(detail, root, null), mask: null } + : normalise_file(detail, root, null); } - dispatch("upload", detail); + + dispatch("upload", normalise_file(detail, root, null)); } function handle_clear({ detail }: CustomEvent): void { @@ -105,6 +112,13 @@ $: dispatch("drag", dragging); + let value_: null | FileData = null; + + $: if (value !== value_) { + value_ = value; + normalise_file(value_, root, null); + } + function handle_image_load(event: Event): void { const element = event.currentTarget as HTMLImageElement; img_width = element.naturalWidth; @@ -203,14 +217,23 @@ on:load={handle_upload} include_file_metadata={false} disable_click={!!value} + {root} > {#if (value === null && !static_image) || streaming} {:else if tool === "select"} - - (handle_clear(e), (tool = "editor"))} /> + + (handle_clear(e), (tool = "editor"))} + /> {:else if tool === "editor"} (tool = "select")} on:clear={handle_clear} editable @@ -220,7 +243,7 @@ {/key} {#if img_width > 0} @@ -326,10 +350,14 @@ /> {/if} {:else if tool === "select"} - - (handle_clear(e), (tool = "editor"))} /> + + (handle_clear(e), (tool = "editor"))} + /> {:else if tool === "editor"} (tool = "select")} on:clear={handle_clear} editable @@ -338,9 +366,8 @@ - - + gradio.dispatch("edit")} on:clear={() => gradio.dispatch("clear")} on:stream={() => gradio.dispatch("stream")} @@ -93,7 +101,8 @@ {pending} {streaming} {mirror_webcam} + i18n={gradio.i18n} > - + diff --git a/js/image/interactive/Webcam.svelte b/js/image/interactive/Webcam.svelte index 8e85de1ad925..b501b26e00bf 100644 --- a/js/image/interactive/Webcam.svelte +++ b/js/image/interactive/Webcam.svelte @@ -1,7 +1,7 @@ - -{#if value === null} + +{#if value_ === null} {:else}
{#if show_download_button} - + {/if} {#if show_share_button} { @@ -59,7 +69,7 @@ {/if}
{/if} diff --git a/js/image/static/StaticImage.svelte b/js/image/static/StaticImage.svelte index bd1006209b5e..eca9aa7bdc49 100644 --- a/js/image/static/StaticImage.svelte +++ b/js/image/static/StaticImage.svelte @@ -5,17 +5,19 @@ import StaticImage from "./ImagePreview.svelte"; import { Block } from "@gradio/atoms"; - import { _ } from "svelte-i18n"; + import { StatusTracker } from "@gradio/statustracker"; + import type { FileData } from "js/upload/src"; import type { LoadingStatus } from "@gradio/statustracker"; export let elem_id = ""; export let elem_classes: string[] = []; export let visible = true; - export let value: null | string = null; + export let value: null | FileData = null; export let label: string; export let show_label: boolean; export let show_download_button: boolean; + export let root: string; export let height: number | undefined; export let width: number | undefined; @@ -53,16 +55,22 @@ {scale} {min_width} > - + gradio.dispatch("select", detail)} on:share={({ detail }) => gradio.dispatch("share", detail)} on:error={({ detail }) => gradio.dispatch("error", detail)} + {root} {value} {label} {show_label} {show_download_button} {selectable} {show_share_button} + i18n={gradio.i18n} /> diff --git a/js/json/CHANGELOG.md b/js/json/CHANGELOG.md index 918e54b5516d..a31dfc222c9e 100644 --- a/js/json/CHANGELOG.md +++ b/js/json/CHANGELOG.md @@ -73,4 +73,4 @@ Thanks [@pngwn](https://github.com/pngwn)! ### Patch Changes - Updated dependencies []: - - @gradio/atoms@0.0.2 + - @gradio/atoms@0.0.2 \ No newline at end of file diff --git a/js/json/package.json b/js/json/package.json index 50ac66e2e820..8237adad284f 100644 --- a/js/json/package.json +++ b/js/json/package.json @@ -6,7 +6,7 @@ "main": "./static/index.ts", "author": "", "license": "ISC", - "private": true, + "private": false, "dependencies": { "@gradio/atoms": "workspace:^", "@gradio/icons": "workspace:^", diff --git a/js/json/static/StaticJson.svelte b/js/json/static/StaticJson.svelte index 498de5b30ea7..721cde89e5b1 100644 --- a/js/json/static/StaticJson.svelte +++ b/js/json/static/StaticJson.svelte @@ -6,7 +6,6 @@ import { StatusTracker } from "@gradio/statustracker"; import type { LoadingStatus } from "@gradio/statustracker"; - import { _ } from "svelte-i18n"; export let elem_id = ""; export let elem_classes: string[] = []; @@ -51,7 +50,11 @@ /> {/if} - + diff --git a/js/label/CHANGELOG.md b/js/label/CHANGELOG.md index fd504682a498..49a036eab984 100644 --- a/js/label/CHANGELOG.md +++ b/js/label/CHANGELOG.md @@ -74,4 +74,4 @@ Thanks [@pngwn](https://github.com/pngwn)! ### Patch Changes - Updated dependencies []: - - @gradio/utils@0.0.2 + - @gradio/utils@0.0.2 \ No newline at end of file diff --git a/js/label/package.json b/js/label/package.json index 09a63b53a3e9..b70761a1bbd6 100644 --- a/js/label/package.json +++ b/js/label/package.json @@ -6,7 +6,7 @@ "main": "./static/index.ts", "author": "", "license": "ISC", - "private": true, + "private": false, "dependencies": { "@gradio/atoms": "workspace:^", "@gradio/icons": "workspace:^", diff --git a/js/label/static/StaticLabel.svelte b/js/label/static/StaticLabel.svelte index 6ba1ce60f5f6..f3c7ee0b56e1 100644 --- a/js/label/static/StaticLabel.svelte +++ b/js/label/static/StaticLabel.svelte @@ -5,8 +5,11 @@ import { Block, BlockLabel, Empty } from "@gradio/atoms"; import { StatusTracker } from "@gradio/statustracker"; import type { LoadingStatus } from "@gradio/statustracker"; - import { _ } from "svelte-i18n"; + export let gradio: Gradio<{ + change: never; + select: SelectData; + }>; export let elem_id = ""; export let elem_classes: string[] = []; export let visible = true; @@ -15,17 +18,13 @@ label?: string; confidences?: { label: string; confidence: number }[]; } = {}; - export let label = $_("label.label"); + export let label = gradio.i18n("label.label"); export let container = true; export let scale: number | null = null; export let min_width: number | undefined = undefined; export let loading_status: LoadingStatus; export let show_label = true; export let selectable = false; - export let gradio: Gradio<{ - change: never; - select: SelectData; - }>; $: ({ confidences, label: _label } = value); $: _label, confidences, gradio.dispatch("change"); @@ -41,7 +40,11 @@ {min_width} padding={false} > - + {#if show_label} {/if} diff --git a/js/markdown/package.json b/js/markdown/package.json index 864fc1410da2..8de74701f5a1 100644 --- a/js/markdown/package.json +++ b/js/markdown/package.json @@ -6,7 +6,7 @@ "main": "./static/index.ts", "author": "", "license": "ISC", - "private": true, + "private": false, "main_changeset": true, "exports": { ".": "./static/index.ts", diff --git a/js/markdown/static/StaticMarkdown.svelte b/js/markdown/static/StaticMarkdown.svelte index 3c4ea687f74c..7c31b567c6a9 100644 --- a/js/markdown/static/StaticMarkdown.svelte +++ b/js/markdown/static/StaticMarkdown.svelte @@ -28,7 +28,12 @@ - +
- + (dragging = detail)} on:change={({ detail }) => gradio.dispatch("change", detail)} on:clear={() => gradio.dispatch("clear")} + i18n={gradio.i18n} > - + diff --git a/js/model3D/interactive/Model3DUpload.svelte b/js/model3D/interactive/Model3DUpload.svelte index aca84c889689..97439725bb1b 100644 --- a/js/model3D/interactive/Model3DUpload.svelte +++ b/js/model3D/interactive/Model3DUpload.svelte @@ -10,6 +10,8 @@ export let clear_color: [number, number, number, number] = [0, 0, 0, 0]; export let label = ""; export let show_label: boolean; + export let root: string; + export let i18n: I18nFormatter; export let zoom_speed = 1; // alpha, beta, radius @@ -84,6 +86,7 @@ import * as BABYLON from "babylonjs"; import * as BABYLON_LOADERS from "babylonjs-loaders"; + import type { I18nFormatter } from "js/utils/src"; BABYLON_LOADERS.OBJFileLoader.IMPORT_VERTEX_COLORS = true; @@ -93,7 +96,12 @@ {#if value === null} - + {:else} @@ -101,6 +109,7 @@ diff --git a/js/model3D/package.json b/js/model3D/package.json index aa793a35d217..b2f86e45fd10 100644 --- a/js/model3D/package.json +++ b/js/model3D/package.json @@ -6,7 +6,7 @@ "main": "./index.svelte", "author": "", "license": "ISC", - "private": true, + "private": false, "dependencies": { "@gradio/atoms": "workspace:^", "@gradio/icons": "workspace:^", diff --git a/js/model3D/static/Model3D.svelte b/js/model3D/static/Model3D.svelte index 399531b076ca..5e6799f49f79 100644 --- a/js/model3D/static/Model3D.svelte +++ b/js/model3D/static/Model3D.svelte @@ -3,15 +3,16 @@ import { BlockLabel, IconButton } from "@gradio/atoms"; import { File, Download, Undo } from "@gradio/icons"; import { add_new_model, reset_camera_position } from "../shared/utils"; - import { _ } from "svelte-i18n"; import { onMount } from "svelte"; import * as BABYLON from "babylonjs"; import * as BABYLON_LOADERS from "babylonjs-loaders"; + import type { I18nFormatter } from "js/utils/src"; export let value: FileData | null; export let clear_color: [number, number, number, number] = [0, 0, 0, 0]; export let label = ""; export let show_label: boolean; + export let i18n: I18nFormatter; export let zoom_speed = 1; // alpha, beta, radius @@ -72,7 +73,11 @@ } - + {#if value}
@@ -82,7 +87,7 @@ target={window.__is_colab__ ? "_blank" : null} download={window.__is_colab__ ? null : value.orig_name || value.name} > - +
diff --git a/js/model3D/static/StaticModel3d.svelte b/js/model3D/static/StaticModel3d.svelte index ddb919c4ca1f..0deacde100c1 100644 --- a/js/model3D/static/StaticModel3d.svelte +++ b/js/model3D/static/StaticModel3d.svelte @@ -7,7 +7,7 @@ import { StatusTracker } from "@gradio/statustracker"; import type { LoadingStatus } from "@gradio/statustracker"; - import { _ } from "svelte-i18n"; + import type { Gradio } from "@gradio/utils"; export let elem_id = ""; export let elem_classes: string[] = []; @@ -22,6 +22,7 @@ export let container = true; export let scale: number | null = null; export let min_width: number | undefined = undefined; + export let gradio: Gradio; export let height: number | undefined = undefined; export let zoom_speed = 1; @@ -50,11 +51,16 @@ {min_width} {height} > - + {#if value} ; + export let label = gradio.i18n("number.number"); export let info: string | undefined = undefined; export let elem_id = ""; export let elem_classes: string[] = []; @@ -21,13 +27,6 @@ export let loading_status: LoadingStatus; export let value_is_output = false; export let step: number | null = null; - export let gradio: Gradio<{ - change: never; - input: never; - submit: never; - blur: never; - focus: never; - }>; - + ; + export let label = gradio.i18n("number.number"); export let info: string | undefined = undefined; export let elem_id = ""; export let elem_classes: string[] = []; @@ -21,13 +27,6 @@ export let loading_status: LoadingStatus; export let value_is_output = false; export let step: number | null = null; - export let gradio: Gradio<{ - change: never; - input: never; - submit: never; - blur: never; - focus: never; - }>; - + - - + + =14.0.0" + }, + "peerDependencies": { + "rollup": "^2.68.0||^3.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/@rollup+plugin-json@6.0.0_rollup@3.28.0/node_modules/@rollup/plugin-json": { + "version": "6.0.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1" + }, + "devDependencies": { + "@rollup/plugin-buble": "^1.0.0", + "@rollup/plugin-node-resolve": "^15.0.0", + "rollup": "^3.2.3", + "source-map-support": "^0.5.21" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/@rollup+plugin-node-resolve@15.1.0_rollup@3.28.0/node_modules/@rollup/plugin-node-resolve": { + "version": "15.1.0", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "is-builtin-module": "^3.2.1", + "is-module": "^1.0.0", + "resolve": "^1.22.1" + }, + "devDependencies": { + "@babel/core": "^7.19.1", + "@babel/plugin-transform-typescript": "^7.10.5", + "@rollup/plugin-babel": "^6.0.0", + "@rollup/plugin-commonjs": "^23.0.0", + "@rollup/plugin-json": "^5.0.0", + "es5-ext": "^0.10.62", + "rollup": "^3.2.3", + "source-map": "^0.7.4", + "string-capitalize": "^1.0.1" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.78.0||^3.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/@rollup+plugin-sucrase@5.0.1_rollup@3.28.0/node_modules/@rollup/plugin-sucrase": { + "version": "5.0.1", + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "sucrase": "^3.27.0" + }, + "devDependencies": { + "@rollup/plugin-alias": "^4.0.0", + "rollup": "^3.2.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.53.1||^3.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/@rollup+plugin-typescript@11.1.2_rollup@3.28.0_typescript@5.1.3/node_modules/@rollup/plugin-typescript": { + "version": "11.1.2", + "dev": true, + "license": "MIT", + "dependencies": { + "@rollup/pluginutils": "^5.0.1", + "resolve": "^1.22.1" + }, + "devDependencies": { + "@rollup/plugin-buble": "^1.0.0", + "@rollup/plugin-commonjs": "^23.0.0", + "@types/node": "^14.18.30", + "@types/resolve": "^1.20.2", + "buble": "^0.20.0", + "rollup": "^3.2.3", + "typescript": "^4.8.3" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^2.14.0||^3.0.0", + "tslib": "*", + "typescript": ">=3.7.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + }, + "tslib": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/@sveltejs+vite-plugin-svelte@2.4.2_svelte@4.2.0_vite@4.4.9/node_modules/@sveltejs/vite-plugin-svelte": { + "version": "2.4.2", + "license": "MIT", + "dependencies": { + "@sveltejs/vite-plugin-svelte-inspector": "^1.0.3", + "debug": "^4.3.4", + "deepmerge": "^4.3.1", + "kleur": "^4.1.5", + "magic-string": "^0.30.0", + "svelte-hmr": "^0.15.2", + "vitefu": "^0.2.4" + }, + "devDependencies": { + "@types/debug": "^4.1.8", + "esbuild": "^0.18.6", + "svelte": "^3.59.2", + "vite": "^4.3.9" + }, + "engines": { + "node": "^14.18.0 || >= 16" + }, + "peerDependencies": { + "svelte": "^3.54.0 || ^4.0.0", + "vite": "^4.0.0" + } + }, + "../../node_modules/.pnpm/coffeescript@2.7.0/node_modules/coffeescript": { + "version": "2.7.0", + "license": "MIT", + "bin": { + "cake": "bin/cake", + "coffee": "bin/coffee" + }, + "devDependencies": { + "@babel/core": "~7.17.8", + "@babel/preset-env": "~7.16.11", + "babel-preset-minify": "~0.5.1", + "codemirror": "~5.65.2", + "docco": "~0.9.1", + "highlight.js": "~11.5.0", + "jison": "~0.4.18", + "markdown-it": "~12.3.2", + "puppeteer": "~13.5.2", + "underscore": "~1.13.2", + "webpack": "~5.71.0" + }, + "engines": { + "node": ">=6" + } + }, + "../../node_modules/.pnpm/css-tree@2.3.1/node_modules/css-tree": { + "version": "2.3.1", + "license": "MIT", + "dependencies": { + "mdn-data": "2.0.30", + "source-map-js": "^1.0.1" + }, + "devDependencies": { + "c8": "^7.12.0", + "clap": "^2.0.1", + "esbuild": "^0.14.53", + "eslint": "^8.4.1", + "json-to-ast": "^2.1.0", + "mocha": "^9.2.2", + "rollup": "^2.68.0" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, + "../../node_modules/.pnpm/esbuild@0.19.0/node_modules/esbuild": { + "version": "0.19.0", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.19.0", + "@esbuild/android-arm64": "0.19.0", + "@esbuild/android-x64": "0.19.0", + "@esbuild/darwin-arm64": "0.19.0", + "@esbuild/darwin-x64": "0.19.0", + "@esbuild/freebsd-arm64": "0.19.0", + "@esbuild/freebsd-x64": "0.19.0", + "@esbuild/linux-arm": "0.19.0", + "@esbuild/linux-arm64": "0.19.0", + "@esbuild/linux-ia32": "0.19.0", + "@esbuild/linux-loong64": "0.19.0", + "@esbuild/linux-mips64el": "0.19.0", + "@esbuild/linux-ppc64": "0.19.0", + "@esbuild/linux-riscv64": "0.19.0", + "@esbuild/linux-s390x": "0.19.0", + "@esbuild/linux-x64": "0.19.0", + "@esbuild/netbsd-x64": "0.19.0", + "@esbuild/openbsd-x64": "0.19.0", + "@esbuild/sunos-x64": "0.19.0", + "@esbuild/win32-arm64": "0.19.0", + "@esbuild/win32-ia32": "0.19.0", + "@esbuild/win32-x64": "0.19.0" + } + }, + "../../node_modules/.pnpm/lightningcss@1.21.7/node_modules/lightningcss": { + "version": "1.21.7", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^1.0.3" + }, + "devDependencies": { + "@babel/parser": "^7.21.4", + "@babel/traverse": "^7.21.4", + "@codemirror/lang-css": "^6.0.1", + "@codemirror/lang-javascript": "^6.1.2", + "@codemirror/lint": "^6.1.0", + "@codemirror/theme-one-dark": "^6.1.0", + "@mdn/browser-compat-data": "^5.2.49", + "@napi-rs/cli": "^2.14.0", + "autoprefixer": "^10.4.14", + "codemirror": "^6.0.1", + "cssnano": "^5.0.8", + "esbuild": "^0.13.10", + "flowgen": "^1.21.0", + "jest-diff": "^27.4.2", + "json-schema-to-typescript": "^11.0.2", + "markdown-it-anchor": "^8.6.6", + "markdown-it-prism": "^2.3.0", + "markdown-it-table-of-contents": "^0.6.0", + "napi-wasm": "^1.0.1", + "node-fetch": "^3.1.0", + "parcel": "^2.8.2", + "patch-package": "^6.5.0", + "path-browserify": "^1.0.1", + "postcss": "^8.3.11", + "posthtml-include": "^1.7.4", + "posthtml-markdownit": "^1.3.1", + "posthtml-prism": "^1.0.4", + "process": "^0.11.10", + "puppeteer": "^12.0.1", + "recast": "^0.22.0", + "sharp": "^0.31.1", + "util": "^0.12.4", + "uvu": "^0.5.6" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-darwin-arm64": "1.21.7", + "lightningcss-darwin-x64": "1.21.7", + "lightningcss-freebsd-x64": "1.21.7", + "lightningcss-linux-arm-gnueabihf": "1.21.7", + "lightningcss-linux-arm64-gnu": "1.21.7", + "lightningcss-linux-arm64-musl": "1.21.7", + "lightningcss-linux-x64-gnu": "1.21.7", + "lightningcss-linux-x64-musl": "1.21.7", + "lightningcss-win32-x64-msvc": "1.21.7" + } + }, + "../../node_modules/.pnpm/pug@3.0.2/node_modules/pug": { + "version": "3.0.2", + "license": "MIT", + "dependencies": { + "pug-code-gen": "^3.0.2", + "pug-filters": "^4.0.0", + "pug-lexer": "^5.0.1", + "pug-linker": "^4.0.0", + "pug-load": "^3.0.0", + "pug-parser": "^6.0.0", + "pug-runtime": "^3.0.1", + "pug-strip-comments": "^2.0.0" + }, + "devDependencies": { + "jstransformer-cdata": "^1.0.0", + "jstransformer-coffee-script": "^1.0.0", + "jstransformer-less": "^2.1.0", + "jstransformer-markdown-it": "^2.0.0", + "jstransformer-stylus": "^1.0.0", + "jstransformer-uglify-js": "^1.1.1", + "jstransformer-verbatim": "^1.0.0", + "mkdirp": "^0.5.1", + "rimraf": "^3.0.2", + "uglify-js": "github:mishoo/UglifyJS2#1c15d0db456ce32f1b9b507aad97e5ee5c8285f7" + } + }, + "../../node_modules/.pnpm/rollup-plugin-ignore@1.0.10/node_modules/rollup-plugin-ignore": { + "version": "1.0.10", + "license": "MIT", + "devDependencies": { + "rollup": "^2.23.0" + } + }, + "../../node_modules/.pnpm/rollup@3.28.0/node_modules/rollup": { + "version": "3.28.0", + "dev": true, + "license": "MIT", + "bin": { + "rollup": "dist/bin/rollup" + }, + "devDependencies": { + "@codemirror/commands": "^6.2.4", + "@codemirror/lang-javascript": "^6.1.9", + "@codemirror/language": "^6.8.0", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.14.1", + "@jridgewell/sourcemap-codec": "^1.4.15", + "@mermaid-js/mermaid-cli": "^10.2.4", + "@rollup/plugin-alias": "^5.0.0", + "@rollup/plugin-buble": "^1.0.2", + "@rollup/plugin-commonjs": "^25.0.3", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "^15.1.0", + "@rollup/plugin-replace": "^5.0.2", + "@rollup/plugin-terser": "^0.4.3", + "@rollup/plugin-typescript": "^11.1.2", + "@rollup/pluginutils": "^5.0.2", + "@types/estree": "1.0.1", + "@types/mocha": "^10.0.1", + "@types/node": "~14.18.54", + "@types/yargs-parser": "^21.0.0", + "@typescript-eslint/eslint-plugin": "^6.2.0", + "@typescript-eslint/parser": "^6.2.0", + "@vue/eslint-config-prettier": "^8.0.0", + "@vue/eslint-config-typescript": "^11.0.3", + "acorn": "^8.10.0", + "acorn-import-assertions": "^1.9.0", + "acorn-jsx": "^5.3.2", + "acorn-walk": "^8.2.0", + "buble": "^0.20.0", + "builtin-modules": "^3.3.0", + "chokidar": "^3.5.3", + "colorette": "^2.0.20", + "concurrently": "^8.2.0", + "core-js": "^3.31.1", + "date-time": "^4.0.0", + "es5-shim": "^4.6.7", + "es6-shim": "^0.35.8", + "eslint": "^8.45.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-prettier": "^5.0.0", + "eslint-plugin-unicorn": "^48.0.0", + "eslint-plugin-vue": "^9.15.1", + "fixturify": "^3.0.0", + "flru": "^1.0.2", + "fs-extra": "^11.1.1", + "github-api": "^3.4.0", + "hash.js": "^1.1.7", + "husky": "^8.0.3", + "inquirer": "^9.2.8", + "is-reference": "^3.0.1", + "lint-staged": "^13.2.3", + "locate-character": "^3.0.0", + "magic-string": "^0.30.1", + "mocha": "^10.2.0", + "nyc": "^15.1.0", + "pinia": "^2.1.4", + "prettier": "^3.0.0", + "pretty-bytes": "^6.1.1", + "pretty-ms": "^8.0.0", + "requirejs": "^2.3.6", + "rollup": "^3.26.3", + "rollup-plugin-license": "^3.0.1", + "rollup-plugin-string": "^3.0.0", + "rollup-plugin-thatworks": "^1.0.4", + "semver": "^7.5.4", + "shx": "^0.3.4", + "signal-exit": "^4.0.2", + "source-map": "^0.7.4", + "source-map-support": "^0.5.21", + "systemjs": "^6.14.1", + "terser": "^5.19.2", + "tslib": "^2.6.1", + "typescript": "^5.1.6", + "vite": "^4.4.7", + "vitepress": "^1.0.0-beta.6", + "vue": "^3.3.4", + "weak-napi": "^2.0.2", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=14.18.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "../../node_modules/.pnpm/sass@1.66.1/node_modules/sass": { + "version": "1.66.1", + "license": "MIT", + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "../../node_modules/.pnpm/stylus@0.60.0/node_modules/stylus": { + "version": "0.60.0", + "license": "MIT", + "dependencies": { + "@adobe/css-tools": "~4.2.0", + "debug": "^4.3.2", + "glob": "^7.1.6", + "sax": "~1.2.4", + "source-map": "^0.7.3" + }, + "bin": { + "stylus": "bin/stylus" + }, + "devDependencies": { + "chai": "^4.3.6", + "mocha": "^9.2.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://opencollective.com/stylus" + } + }, + "../../node_modules/.pnpm/sucrase@3.34.0/node_modules/sucrase": { + "version": "3.34.0", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "7.1.6", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "devDependencies": { + "@babel/core": "^7.22.5", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/glob": "^7", + "@types/mocha": "^9.1.1", + "@types/mz": "^2.7.4", + "@types/node": "^20.3.2", + "@typescript-eslint/eslint-plugin": "^5.60.1", + "@typescript-eslint/parser": "^5.60.1", + "chalk": "^4", + "codecov": "^3.8.3", + "eslint": "^8.43.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "~2.26", + "eslint-plugin-prettier": "^4.2.1", + "mocha": "^10.2.0", + "nyc": "^15.1.0", + "prettier": "^2.8.8", + "sucrase": "^3.33.0", + "test262-harness": "^10.0.0", + "ts-interface-builder": "^0.3.3", + "typescript": "~5.0" + }, + "engines": { + "node": ">=8" + } + }, + "../../node_modules/.pnpm/sugarss@4.0.1_postcss@8.4.27/node_modules/sugarss": { + "version": "4.0.1", + "license": "MIT", + "engines": { + "node": ">=12.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.3.3" + } + }, + "../../node_modules/.pnpm/svelte-hmr@0.15.3_svelte@4.2.0/node_modules/svelte-hmr": { + "version": "0.15.3", + "dev": true, + "license": "ISC", + "devDependencies": { + "dotenv": "^10.0.0", + "prettier": "^1.19.1", + "svelte": "^3.59.2", + "tap-mocha-reporter": "^5.0.3", + "zoar": "^0.3.0", + "zorax": "^0.0.14" + }, + "engines": { + "node": "^12.20 || ^14.13.1 || >= 16" + }, + "peerDependencies": { + "svelte": "^3.19.0 || ^4.0.0" + } + }, + "../../node_modules/.pnpm/svelte@4.2.0/node_modules/svelte": { + "version": "4.2.0", + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.1", + "@jridgewell/sourcemap-codec": "^1.4.15", + "@jridgewell/trace-mapping": "^0.3.18", + "acorn": "^8.9.0", + "aria-query": "^5.3.0", + "axobject-query": "^3.2.1", + "code-red": "^1.0.3", + "css-tree": "^2.3.1", + "estree-walker": "^3.0.3", + "is-reference": "^3.0.1", + "locate-character": "^3.0.0", + "magic-string": "^0.30.0", + "periscopic": "^3.1.0" + }, + "devDependencies": { + "@playwright/test": "^1.35.1", + "@rollup/plugin-commonjs": "^24.1.0", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "^15.1.0", + "@sveltejs/eslint-config": "^6.0.4", + "@types/aria-query": "^5.0.1", + "@types/estree": "^1.0.1", + "@types/node": "^14.18.51", + "agadoo": "^3.0.0", + "dts-buddy": "^0.1.7", + "esbuild": "^0.18.11", + "happy-dom": "^9.20.3", + "jsdom": "^21.1.2", + "kleur": "^4.1.5", + "rollup": "^3.26.2", + "source-map": "^0.7.4", + "tiny-glob": "^0.2.9", + "typescript": "^5.1.3", + "vitest": "^0.33.0" + }, + "engines": { + "node": ">=16" + } + }, + "../../node_modules/.pnpm/vite@4.4.9_@types+node@20.3.1_less@4.1.3_lightningcss@1.21.7_sass@1.66.1_stylus@0.60.0_sugarss@4.0.1/node_modules/vite": { + "version": "4.4.9", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.18.10", + "postcss": "^8.4.27", + "rollup": "^3.27.1" + }, + "bin": { + "vite": "bin/vite.js" + }, + "devDependencies": { + "@ampproject/remapping": "^2.2.1", + "@babel/parser": "^7.22.7", + "@babel/types": "^7.22.5", + "@jridgewell/trace-mapping": "^0.3.18", + "@rollup/plugin-alias": "^4.0.4", + "@rollup/plugin-commonjs": "^25.0.3", + "@rollup/plugin-dynamic-import-vars": "^2.0.4", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "15.1.0", + "@rollup/plugin-typescript": "^11.1.2", + "@rollup/pluginutils": "^5.0.2", + "@types/escape-html": "^1.0.2", + "@types/pnpapi": "^0.0.2", + "acorn": "^8.10.0", + "acorn-walk": "^8.2.0", + "cac": "^6.7.14", + "chokidar": "^3.5.3", + "connect": "^3.7.0", + "connect-history-api-fallback": "^2.0.0", + "convert-source-map": "^2.0.0", + "cors": "^2.8.5", + "cross-spawn": "^7.0.3", + "debug": "^4.3.4", + "dep-types": "link:./src/types", + "dotenv": "^16.3.1", + "dotenv-expand": "^9.0.0", + "es-module-lexer": "^1.3.0", + "escape-html": "^1.0.3", + "estree-walker": "^3.0.3", + "etag": "^1.8.1", + "fast-glob": "^3.3.1", + "http-proxy": "^1.18.1", + "json-stable-stringify": "^1.0.2", + "launch-editor-middleware": "^2.6.0", + "lightningcss": "^1.21.5", + "magic-string": "^0.30.2", + "micromatch": "^4.0.5", + "mlly": "^1.4.0", + "mrmime": "^1.0.1", + "okie": "^1.0.1", + "open": "^8.4.2", + "parse5": "^7.1.2", + "periscopic": "^3.1.0", + "picocolors": "^1.0.0", + "picomatch": "^2.3.1", + "postcss-import": "^15.1.0", + "postcss-load-config": "^4.0.1", + "postcss-modules": "^6.0.0", + "resolve.exports": "^2.0.2", + "rollup-plugin-license": "^3.0.1", + "sirv": "^2.0.3", + "source-map-support": "^0.5.21", + "strip-ansi": "^7.1.0", + "strip-literal": "^1.3.0", + "tsconfck": "^2.1.2", + "tslib": "^2.6.1", + "types": "link:./types", + "ufo": "^1.2.0", + "ws": "^8.13.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + }, + "peerDependencies": { + "@types/node": ">= 14", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "../../node_modules/.pnpm/yootils@0.3.1/node_modules/yootils": { + "version": "0.3.1", + "license": "MIT", + "devDependencies": { + "agadoo": "^2.0.0", + "esbuild": "^0.12.8", + "prettier": "^2.3.1", + "typescript": "^4.3.2", + "uvu": "^0.5.1" + } + }, + "node_modules/@originjs/vite-plugin-commonjs": { + "resolved": "../../node_modules/.pnpm/@originjs+vite-plugin-commonjs@1.0.3/node_modules/@originjs/vite-plugin-commonjs", + "link": true + }, + "node_modules/@rollup/plugin-commonjs": { + "resolved": "../../node_modules/.pnpm/@rollup+plugin-commonjs@25.0.4_rollup@3.28.0/node_modules/@rollup/plugin-commonjs", + "link": true + }, + "node_modules/@rollup/plugin-json": { + "resolved": "../../node_modules/.pnpm/@rollup+plugin-json@6.0.0_rollup@3.28.0/node_modules/@rollup/plugin-json", + "link": true + }, + "node_modules/@rollup/plugin-node-resolve": { + "resolved": "../../node_modules/.pnpm/@rollup+plugin-node-resolve@15.1.0_rollup@3.28.0/node_modules/@rollup/plugin-node-resolve", + "link": true + }, + "node_modules/@rollup/plugin-sucrase": { + "resolved": "../../node_modules/.pnpm/@rollup+plugin-sucrase@5.0.1_rollup@3.28.0/node_modules/@rollup/plugin-sucrase", + "link": true + }, + "node_modules/@rollup/plugin-typescript": { + "resolved": "../../node_modules/.pnpm/@rollup+plugin-typescript@11.1.2_rollup@3.28.0_typescript@5.1.3/node_modules/@rollup/plugin-typescript", + "link": true + }, + "node_modules/@sveltejs/vite-plugin-svelte": { + "resolved": "../../node_modules/.pnpm/@sveltejs+vite-plugin-svelte@2.4.2_svelte@4.2.0_vite@4.4.9/node_modules/@sveltejs/vite-plugin-svelte", + "link": true + }, + "node_modules/coffeescript": { + "resolved": "../../node_modules/.pnpm/coffeescript@2.7.0/node_modules/coffeescript", + "link": true + }, + "node_modules/css-tree": { + "resolved": "../../node_modules/.pnpm/css-tree@2.3.1/node_modules/css-tree", + "link": true + }, + "node_modules/esbuild": { + "resolved": "../../node_modules/.pnpm/esbuild@0.19.0/node_modules/esbuild", + "link": true + }, + "node_modules/lightningcss": { + "resolved": "../../node_modules/.pnpm/lightningcss@1.21.7/node_modules/lightningcss", + "link": true + }, + "node_modules/pug": { + "resolved": "../../node_modules/.pnpm/pug@3.0.2/node_modules/pug", + "link": true + }, + "node_modules/rollup": { + "resolved": "../../node_modules/.pnpm/rollup@3.28.0/node_modules/rollup", + "link": true + }, + "node_modules/rollup-plugin-ignore": { + "resolved": "../../node_modules/.pnpm/rollup-plugin-ignore@1.0.10/node_modules/rollup-plugin-ignore", + "link": true + }, + "node_modules/sass": { + "resolved": "../../node_modules/.pnpm/sass@1.66.1/node_modules/sass", + "link": true + }, + "node_modules/stylus": { + "resolved": "../../node_modules/.pnpm/stylus@0.60.0/node_modules/stylus", + "link": true + }, + "node_modules/sucrase": { + "resolved": "../../node_modules/.pnpm/sucrase@3.34.0/node_modules/sucrase", + "link": true + }, + "node_modules/sugarss": { + "resolved": "../../node_modules/.pnpm/sugarss@4.0.1_postcss@8.4.27/node_modules/sugarss", + "link": true + }, + "node_modules/svelte": { + "resolved": "../../node_modules/.pnpm/svelte@4.2.0/node_modules/svelte", + "link": true + }, + "node_modules/svelte-hmr": { + "resolved": "../../node_modules/.pnpm/svelte-hmr@0.15.3_svelte@4.2.0/node_modules/svelte-hmr", + "link": true + }, + "node_modules/vite": { + "resolved": "../../node_modules/.pnpm/vite@4.4.9_@types+node@20.3.1_less@4.1.3_lightningcss@1.21.7_sass@1.66.1_stylus@0.60.0_sugarss@4.0.1/node_modules/vite", + "link": true + }, + "node_modules/yootils": { + "resolved": "../../node_modules/.pnpm/yootils@0.3.1/node_modules/yootils", + "link": true + } + }, + "dependencies": { + "@originjs/vite-plugin-commonjs": { + "version": "file:../../node_modules/.pnpm/@originjs+vite-plugin-commonjs@1.0.3/node_modules/@originjs/vite-plugin-commonjs", + "requires": { + "@types/node": "^15.12.2", + "esbuild": "^0.14.14", + "typescript": "^4.3.2", + "vite": "^2.3.6" + } + }, + "@rollup/plugin-commonjs": { + "version": "file:../../node_modules/.pnpm/@rollup+plugin-commonjs@25.0.4_rollup@3.28.0/node_modules/@rollup/plugin-commonjs", + "requires": { + "@rollup/plugin-json": "^5.0.0", + "@rollup/plugin-node-resolve": "^15.0.0", + "@rollup/pluginutils": "^5.0.1", + "commondir": "^1.0.1", + "estree-walker": "^2.0.2", + "glob": "^8.0.3", + "is-reference": "1.2.1", + "locate-character": "^2.0.5", + "magic-string": "^0.27.0", + "require-relative": "^0.8.7", + "rollup": "^3.19.0", + "shx": "^0.3.4", + "source-map": "^0.7.4", + "source-map-support": "^0.5.21", + "typescript": "^4.8.3" + } + }, + "@rollup/plugin-json": { + "version": "file:../../node_modules/.pnpm/@rollup+plugin-json@6.0.0_rollup@3.28.0/node_modules/@rollup/plugin-json", + "requires": { + "@rollup/plugin-buble": "^1.0.0", + "@rollup/plugin-node-resolve": "^15.0.0", + "@rollup/pluginutils": "^5.0.1", + "rollup": "^3.2.3", + "source-map-support": "^0.5.21" + } + }, + "@rollup/plugin-node-resolve": { + "version": "file:../../node_modules/.pnpm/@rollup+plugin-node-resolve@15.1.0_rollup@3.28.0/node_modules/@rollup/plugin-node-resolve", + "requires": { + "@babel/core": "^7.19.1", + "@babel/plugin-transform-typescript": "^7.10.5", + "@rollup/plugin-babel": "^6.0.0", + "@rollup/plugin-commonjs": "^23.0.0", + "@rollup/plugin-json": "^5.0.0", + "@rollup/pluginutils": "^5.0.1", + "@types/resolve": "1.20.2", + "deepmerge": "^4.2.2", + "es5-ext": "^0.10.62", + "is-builtin-module": "^3.2.1", + "is-module": "^1.0.0", + "resolve": "^1.22.1", + "rollup": "^3.2.3", + "source-map": "^0.7.4", + "string-capitalize": "^1.0.1" + } + }, + "@rollup/plugin-sucrase": { + "version": "file:../../node_modules/.pnpm/@rollup+plugin-sucrase@5.0.1_rollup@3.28.0/node_modules/@rollup/plugin-sucrase", + "requires": { + "@rollup/plugin-alias": "^4.0.0", + "@rollup/pluginutils": "^5.0.1", + "rollup": "^3.2.3", + "sucrase": "^3.27.0" + } + }, + "@rollup/plugin-typescript": { + "version": "file:../../node_modules/.pnpm/@rollup+plugin-typescript@11.1.2_rollup@3.28.0_typescript@5.1.3/node_modules/@rollup/plugin-typescript", + "requires": { + "@rollup/plugin-buble": "^1.0.0", + "@rollup/plugin-commonjs": "^23.0.0", + "@rollup/pluginutils": "^5.0.1", + "@types/node": "^14.18.30", + "@types/resolve": "^1.20.2", + "buble": "^0.20.0", + "resolve": "^1.22.1", + "rollup": "^3.2.3", + "typescript": "^4.8.3" + } + }, + "@sveltejs/vite-plugin-svelte": { + "version": "file:../../node_modules/.pnpm/@sveltejs+vite-plugin-svelte@2.4.2_svelte@4.2.0_vite@4.4.9/node_modules/@sveltejs/vite-plugin-svelte", + "requires": { + "@sveltejs/vite-plugin-svelte-inspector": "^1.0.3", + "@types/debug": "^4.1.8", + "debug": "^4.3.4", + "deepmerge": "^4.3.1", + "esbuild": "^0.18.6", + "kleur": "^4.1.5", + "magic-string": "^0.30.0", + "svelte": "^3.59.2", + "svelte-hmr": "^0.15.2", + "vite": "^4.3.9", + "vitefu": "^0.2.4" + } + }, + "coffeescript": { + "version": "file:../../node_modules/.pnpm/coffeescript@2.7.0/node_modules/coffeescript", + "requires": { + "@babel/core": "~7.17.8", + "@babel/preset-env": "~7.16.11", + "babel-preset-minify": "~0.5.1", + "codemirror": "~5.65.2", + "docco": "~0.9.1", + "highlight.js": "~11.5.0", + "jison": "~0.4.18", + "markdown-it": "~12.3.2", + "puppeteer": "~13.5.2", + "underscore": "~1.13.2", + "webpack": "~5.71.0" + } + }, + "css-tree": { + "version": "file:../../node_modules/.pnpm/css-tree@2.3.1/node_modules/css-tree", + "requires": { + "c8": "^7.12.0", + "clap": "^2.0.1", + "esbuild": "^0.14.53", + "eslint": "^8.4.1", + "json-to-ast": "^2.1.0", + "mdn-data": "2.0.30", + "mocha": "^9.2.2", + "rollup": "^2.68.0", + "source-map-js": "^1.0.1" + } + }, + "esbuild": { + "version": "file:../../node_modules/.pnpm/esbuild@0.19.0/node_modules/esbuild", + "requires": { + "@esbuild/android-arm": "0.19.0", + "@esbuild/android-arm64": "0.19.0", + "@esbuild/android-x64": "0.19.0", + "@esbuild/darwin-arm64": "0.19.0", + "@esbuild/darwin-x64": "0.19.0", + "@esbuild/freebsd-arm64": "0.19.0", + "@esbuild/freebsd-x64": "0.19.0", + "@esbuild/linux-arm": "0.19.0", + "@esbuild/linux-arm64": "0.19.0", + "@esbuild/linux-ia32": "0.19.0", + "@esbuild/linux-loong64": "0.19.0", + "@esbuild/linux-mips64el": "0.19.0", + "@esbuild/linux-ppc64": "0.19.0", + "@esbuild/linux-riscv64": "0.19.0", + "@esbuild/linux-s390x": "0.19.0", + "@esbuild/linux-x64": "0.19.0", + "@esbuild/netbsd-x64": "0.19.0", + "@esbuild/openbsd-x64": "0.19.0", + "@esbuild/sunos-x64": "0.19.0", + "@esbuild/win32-arm64": "0.19.0", + "@esbuild/win32-ia32": "0.19.0", + "@esbuild/win32-x64": "0.19.0" + } + }, + "lightningcss": { + "version": "file:../../node_modules/.pnpm/lightningcss@1.21.7/node_modules/lightningcss", + "requires": { + "@babel/parser": "^7.21.4", + "@babel/traverse": "^7.21.4", + "@codemirror/lang-css": "^6.0.1", + "@codemirror/lang-javascript": "^6.1.2", + "@codemirror/lint": "^6.1.0", + "@codemirror/theme-one-dark": "^6.1.0", + "@mdn/browser-compat-data": "^5.2.49", + "@napi-rs/cli": "^2.14.0", + "autoprefixer": "^10.4.14", + "codemirror": "^6.0.1", + "cssnano": "^5.0.8", + "detect-libc": "^1.0.3", + "esbuild": "^0.13.10", + "flowgen": "^1.21.0", + "jest-diff": "^27.4.2", + "json-schema-to-typescript": "^11.0.2", + "lightningcss-darwin-arm64": "1.21.7", + "lightningcss-darwin-x64": "1.21.7", + "lightningcss-freebsd-x64": "1.21.7", + "lightningcss-linux-arm-gnueabihf": "1.21.7", + "lightningcss-linux-arm64-gnu": "1.21.7", + "lightningcss-linux-arm64-musl": "1.21.7", + "lightningcss-linux-x64-gnu": "1.21.7", + "lightningcss-linux-x64-musl": "1.21.7", + "lightningcss-win32-x64-msvc": "1.21.7", + "markdown-it-anchor": "^8.6.6", + "markdown-it-prism": "^2.3.0", + "markdown-it-table-of-contents": "^0.6.0", + "napi-wasm": "^1.0.1", + "node-fetch": "^3.1.0", + "parcel": "^2.8.2", + "patch-package": "^6.5.0", + "path-browserify": "^1.0.1", + "postcss": "^8.3.11", + "posthtml-include": "^1.7.4", + "posthtml-markdownit": "^1.3.1", + "posthtml-prism": "^1.0.4", + "process": "^0.11.10", + "puppeteer": "^12.0.1", + "recast": "^0.22.0", + "sharp": "^0.31.1", + "util": "^0.12.4", + "uvu": "^0.5.6" + } + }, + "pug": { + "version": "file:../../node_modules/.pnpm/pug@3.0.2/node_modules/pug", + "requires": { + "jstransformer-cdata": "^1.0.0", + "jstransformer-coffee-script": "^1.0.0", + "jstransformer-less": "^2.1.0", + "jstransformer-markdown-it": "^2.0.0", + "jstransformer-stylus": "^1.0.0", + "jstransformer-uglify-js": "^1.1.1", + "jstransformer-verbatim": "^1.0.0", + "mkdirp": "^0.5.1", + "pug-code-gen": "^3.0.2", + "pug-filters": "^4.0.0", + "pug-lexer": "^5.0.1", + "pug-linker": "^4.0.0", + "pug-load": "^3.0.0", + "pug-parser": "^6.0.0", + "pug-runtime": "^3.0.1", + "pug-strip-comments": "^2.0.0", + "rimraf": "^3.0.2", + "uglify-js": "github:mishoo/UglifyJS2#1c15d0db456ce32f1b9b507aad97e5ee5c8285f7" + } + }, + "rollup": { + "version": "file:../../node_modules/.pnpm/rollup@3.28.0/node_modules/rollup", + "requires": { + "@codemirror/commands": "^6.2.4", + "@codemirror/lang-javascript": "^6.1.9", + "@codemirror/language": "^6.8.0", + "@codemirror/search": "^6.5.0", + "@codemirror/state": "^6.2.1", + "@codemirror/view": "^6.14.1", + "@jridgewell/sourcemap-codec": "^1.4.15", + "@mermaid-js/mermaid-cli": "^10.2.4", + "@rollup/plugin-alias": "^5.0.0", + "@rollup/plugin-buble": "^1.0.2", + "@rollup/plugin-commonjs": "^25.0.3", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "^15.1.0", + "@rollup/plugin-replace": "^5.0.2", + "@rollup/plugin-terser": "^0.4.3", + "@rollup/plugin-typescript": "^11.1.2", + "@rollup/pluginutils": "^5.0.2", + "@types/estree": "1.0.1", + "@types/mocha": "^10.0.1", + "@types/node": "~14.18.54", + "@types/yargs-parser": "^21.0.0", + "@typescript-eslint/eslint-plugin": "^6.2.0", + "@typescript-eslint/parser": "^6.2.0", + "@vue/eslint-config-prettier": "^8.0.0", + "@vue/eslint-config-typescript": "^11.0.3", + "acorn": "^8.10.0", + "acorn-import-assertions": "^1.9.0", + "acorn-jsx": "^5.3.2", + "acorn-walk": "^8.2.0", + "buble": "^0.20.0", + "builtin-modules": "^3.3.0", + "chokidar": "^3.5.3", + "colorette": "^2.0.20", + "concurrently": "^8.2.0", + "core-js": "^3.31.1", + "date-time": "^4.0.0", + "es5-shim": "^4.6.7", + "es6-shim": "^0.35.8", + "eslint": "^8.45.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "^2.27.5", + "eslint-plugin-prettier": "^5.0.0", + "eslint-plugin-unicorn": "^48.0.0", + "eslint-plugin-vue": "^9.15.1", + "fixturify": "^3.0.0", + "flru": "^1.0.2", + "fs-extra": "^11.1.1", + "fsevents": "~2.3.2", + "github-api": "^3.4.0", + "hash.js": "^1.1.7", + "husky": "^8.0.3", + "inquirer": "^9.2.8", + "is-reference": "^3.0.1", + "lint-staged": "^13.2.3", + "locate-character": "^3.0.0", + "magic-string": "^0.30.1", + "mocha": "^10.2.0", + "nyc": "^15.1.0", + "pinia": "^2.1.4", + "prettier": "^3.0.0", + "pretty-bytes": "^6.1.1", + "pretty-ms": "^8.0.0", + "requirejs": "^2.3.6", + "rollup": "^3.26.3", + "rollup-plugin-license": "^3.0.1", + "rollup-plugin-string": "^3.0.0", + "rollup-plugin-thatworks": "^1.0.4", + "semver": "^7.5.4", + "shx": "^0.3.4", + "signal-exit": "^4.0.2", + "source-map": "^0.7.4", + "source-map-support": "^0.5.21", + "systemjs": "^6.14.1", + "terser": "^5.19.2", + "tslib": "^2.6.1", + "typescript": "^5.1.6", + "vite": "^4.4.7", + "vitepress": "^1.0.0-beta.6", + "vue": "^3.3.4", + "weak-napi": "^2.0.2", + "yargs-parser": "^21.1.1" + } + }, + "rollup-plugin-ignore": { + "version": "file:../../node_modules/.pnpm/rollup-plugin-ignore@1.0.10/node_modules/rollup-plugin-ignore", + "requires": { + "rollup": "^2.23.0" + } + }, + "sass": { + "version": "file:../../node_modules/.pnpm/sass@1.66.1/node_modules/sass", + "requires": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + } + }, + "stylus": { + "version": "file:../../node_modules/.pnpm/stylus@0.60.0/node_modules/stylus", + "requires": { + "@adobe/css-tools": "~4.2.0", + "chai": "^4.3.6", + "debug": "^4.3.2", + "glob": "^7.1.6", + "mocha": "^9.2.0", + "sax": "~1.2.4", + "source-map": "^0.7.3" + } + }, + "sucrase": { + "version": "file:../../node_modules/.pnpm/sucrase@3.34.0/node_modules/sucrase", + "requires": { + "@babel/core": "^7.22.5", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/glob": "^7", + "@types/mocha": "^9.1.1", + "@types/mz": "^2.7.4", + "@types/node": "^20.3.2", + "@typescript-eslint/eslint-plugin": "^5.60.1", + "@typescript-eslint/parser": "^5.60.1", + "chalk": "^4", + "codecov": "^3.8.3", + "commander": "^4.0.0", + "eslint": "^8.43.0", + "eslint-config-airbnb-base": "^15.0.0", + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-import": "~2.26", + "eslint-plugin-prettier": "^4.2.1", + "glob": "7.1.6", + "lines-and-columns": "^1.1.6", + "mocha": "^10.2.0", + "mz": "^2.7.0", + "nyc": "^15.1.0", + "pirates": "^4.0.1", + "prettier": "^2.8.8", + "sucrase": "^3.33.0", + "test262-harness": "^10.0.0", + "ts-interface-builder": "^0.3.3", + "ts-interface-checker": "^0.1.9", + "typescript": "~5.0" + } + }, + "sugarss": { + "version": "file:../../node_modules/.pnpm/sugarss@4.0.1_postcss@8.4.27/node_modules/sugarss", + "requires": {} + }, + "svelte": { + "version": "file:../../node_modules/.pnpm/svelte@4.2.0/node_modules/svelte", + "requires": { + "@ampproject/remapping": "^2.2.1", + "@jridgewell/sourcemap-codec": "^1.4.15", + "@jridgewell/trace-mapping": "^0.3.18", + "@playwright/test": "^1.35.1", + "@rollup/plugin-commonjs": "^24.1.0", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "^15.1.0", + "@sveltejs/eslint-config": "^6.0.4", + "@types/aria-query": "^5.0.1", + "@types/estree": "^1.0.1", + "@types/node": "^14.18.51", + "acorn": "^8.9.0", + "agadoo": "^3.0.0", + "aria-query": "^5.3.0", + "axobject-query": "^3.2.1", + "code-red": "^1.0.3", + "css-tree": "^2.3.1", + "dts-buddy": "^0.1.7", + "esbuild": "^0.18.11", + "estree-walker": "^3.0.3", + "happy-dom": "^9.20.3", + "is-reference": "^3.0.1", + "jsdom": "^21.1.2", + "kleur": "^4.1.5", + "locate-character": "^3.0.0", + "magic-string": "^0.30.0", + "periscopic": "^3.1.0", + "rollup": "^3.26.2", + "source-map": "^0.7.4", + "tiny-glob": "^0.2.9", + "typescript": "^5.1.3", + "vitest": "^0.33.0" + } + }, + "svelte-hmr": { + "version": "file:../../node_modules/.pnpm/svelte-hmr@0.15.3_svelte@4.2.0/node_modules/svelte-hmr", + "requires": { + "dotenv": "^10.0.0", + "prettier": "^1.19.1", + "svelte": "^3.59.2", + "tap-mocha-reporter": "^5.0.3", + "zoar": "^0.3.0", + "zorax": "^0.0.14" + } + }, + "vite": { + "version": "file:../../node_modules/.pnpm/vite@4.4.9_@types+node@20.3.1_less@4.1.3_lightningcss@1.21.7_sass@1.66.1_stylus@0.60.0_sugarss@4.0.1/node_modules/vite", + "requires": { + "@ampproject/remapping": "^2.2.1", + "@babel/parser": "^7.22.7", + "@babel/types": "^7.22.5", + "@jridgewell/trace-mapping": "^0.3.18", + "@rollup/plugin-alias": "^4.0.4", + "@rollup/plugin-commonjs": "^25.0.3", + "@rollup/plugin-dynamic-import-vars": "^2.0.4", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "15.1.0", + "@rollup/plugin-typescript": "^11.1.2", + "@rollup/pluginutils": "^5.0.2", + "@types/escape-html": "^1.0.2", + "@types/pnpapi": "^0.0.2", + "acorn": "^8.10.0", + "acorn-walk": "^8.2.0", + "cac": "^6.7.14", + "chokidar": "^3.5.3", + "connect": "^3.7.0", + "connect-history-api-fallback": "^2.0.0", + "convert-source-map": "^2.0.0", + "cors": "^2.8.5", + "cross-spawn": "^7.0.3", + "debug": "^4.3.4", + "dep-types": "link:./src/types", + "dotenv": "^16.3.1", + "dotenv-expand": "^9.0.0", + "es-module-lexer": "^1.3.0", + "esbuild": "^0.18.10", + "escape-html": "^1.0.3", + "estree-walker": "^3.0.3", + "etag": "^1.8.1", + "fast-glob": "^3.3.1", + "fsevents": "~2.3.2", + "http-proxy": "^1.18.1", + "json-stable-stringify": "^1.0.2", + "launch-editor-middleware": "^2.6.0", + "lightningcss": "^1.21.5", + "magic-string": "^0.30.2", + "micromatch": "^4.0.5", + "mlly": "^1.4.0", + "mrmime": "^1.0.1", + "okie": "^1.0.1", + "open": "^8.4.2", + "parse5": "^7.1.2", + "periscopic": "^3.1.0", + "picocolors": "^1.0.0", + "picomatch": "^2.3.1", + "postcss": "^8.4.27", + "postcss-import": "^15.1.0", + "postcss-load-config": "^4.0.1", + "postcss-modules": "^6.0.0", + "resolve.exports": "^2.0.2", + "rollup": "^3.27.1", + "rollup-plugin-license": "^3.0.1", + "sirv": "^2.0.3", + "source-map-support": "^0.5.21", + "strip-ansi": "^7.1.0", + "strip-literal": "^1.3.0", + "tsconfck": "^2.1.2", + "tslib": "^2.6.1", + "types": "link:./types", + "ufo": "^1.2.0", + "ws": "^8.13.0" + } + }, + "yootils": { + "version": "file:../../node_modules/.pnpm/yootils@0.3.1/node_modules/yootils", + "requires": { + "agadoo": "^2.0.0", + "esbuild": "^0.12.8", + "prettier": "^2.3.1", + "typescript": "^4.3.2", + "uvu": "^0.5.1" + } + } + } +} diff --git a/js/preview/package.json b/js/preview/package.json new file mode 100644 index 000000000000..0cec46812701 --- /dev/null +++ b/js/preview/package.json @@ -0,0 +1,41 @@ +{ + "name": "@gradio/preview", + "version": "0.1.0-beta.5", + "description": "Gradio UI packages", + "type": "module", + "main": "src/index.ts", + "author": "", + "license": "ISC", + "private": false, + "scripts": { + "build": "rollup -c" + }, + "devDependencies": { + "@rollup/plugin-commonjs": "^25.0.4", + "@rollup/plugin-json": "^6.0.0", + "@rollup/plugin-node-resolve": "^15.1.0", + "@rollup/plugin-typescript": "^11.1.2", + "rollup": "^3.28.0", + "svelte-hmr": "^0.15.3", + "vite": "^4.4.9" + }, + "dependencies": { + "@originjs/vite-plugin-commonjs": "^1.0.3", + "@rollup/plugin-sucrase": "^5.0.1", + "@sveltejs/vite-plugin-svelte": "^2.4.2", + "@types/which": "^3.0.0", + "coffeescript": "^2.7.0", + "css-tree": "2.3.1", + "esbuild-wasm": "^0.19.0", + "lightningcss": "^1.21.7", + "pug": "^3.0.2", + "rollup-plugin-ignore": "^1.0.10", + "sass": "^1.66.1", + "stylus": "^0.60.0", + "sucrase": "^3.34.0", + "sugarss": "^4.0.1", + "svelte": "^4.2.0", + "which": "4.0.0", + "yootils": "^0.3.1" + } +} diff --git a/js/preview/rollup.config.js b/js/preview/rollup.config.js new file mode 100644 index 000000000000..93926cb799cf --- /dev/null +++ b/js/preview/rollup.config.js @@ -0,0 +1,197 @@ +// @ts-nocheck +import { createRequire } from "node:module"; +import { join, dirname } from "path"; +import { cpSync, writeFileSync, rmSync, existsSync } from "fs"; +import { fileURLToPath } from "url"; + +import ts from "@rollup/plugin-typescript"; +import node from "@rollup/plugin-node-resolve"; +import cjs from "@rollup/plugin-commonjs"; +import json from "@rollup/plugin-json"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); + +const require = createRequire(import.meta.url); + +const esbuild_binary_path = require.resolve("esbuild-wasm"); +const vite_client = require.resolve("vite/dist/client/client.mjs"); +const hmr = require.resolve("svelte-hmr"); + +const output_svelte_dir = "../../gradio/templates/frontend/assets/svelte"; + +const onwarn = (warning, warn) => { + if (warning.plugin === "typescript") return; + if (warning.code === "CIRCULAR_DEPENDENCY") return; + if (warning.code === "EVAL") return; + + warn(warning); +}; + +const RE_SVELTE_IMPORT = + /import\s+([\w*{},\s]+)\s+from\s+['"](svelte|svelte\/internal)['"]/g; + +const dirname_def = ` +import { dirname } from 'path'; +import { fileURLToPath } from 'url'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +`; + +function inject_dirname() { + return { + name: "inject __dirname", + + transform(code, id) { + if (id.includes("svelte-hmr/index.js")) { + return `${dirname_def}\n${code}`; + } + } + }; +} + +function resolve_imports() { + return { + name: "resolve-imports", + resolveId(id) { + const pkgs = [ + "sugarss", + "stylus", + "sass", + "pug", + "coffeescript", + "lightningcss" + ]; + if (pkgs.includes(id)) { + return join(__dirname, "src", "placeholder.ts"); + } + if (id === "svelte/compiler") return "../compiler.js"; + }, + transform(code) { + const new_code = code.replace(RE_SVELTE_IMPORT, (str, $1, $2) => { + const identifier = $1.trim().startsWith("* as") + ? $1.replace("* as", "").trim() + : $1.trim(); + return `const ${identifier.replace( + " as ", + ": " + )} = window.__gradio__svelte__internal;`; + }); + return { + code: new_code, + map: null + }; + } + }; +} + +export function copy_files() { + return { + name: "copy_files", + writeBundle() { + cpSync(join(vite_client, ".."), "../../gradio/node/dist/client", { + recursive: true + }); + + cpSync(join(hmr, "../runtime"), "../../gradio/node/dev/files/runtime", { + recursive: true + }); + cpSync( + join(esbuild_binary_path, "..", "..", ".."), + "../../gradio/node/dev/node_modules", + { + recursive: true + } + ); + + cpSync("./src/examine.py", "../../gradio/node/examine.py"); + + writeFileSync( + "../../gradio/node/package.json", + `{"type": "module", "version": "0.0.0"}`, + { + encoding: "utf-8" + } + ); + } + }; +} + +const plugins = [node({ preferBuiltins: true }), json(), cjs(), ts()]; + +export default [ + { + input: "src/index.ts", + output: { + dir: "../../gradio/node/dev/files", + format: "esm", + minifyInternalExports: false + }, + onwarn, + plugins: [ + { + resolveId(id, importer) { + if (id === "esbuild") { + return "esbuild-wasm"; + } + } + }, + ...plugins, + { + name: "clean_dir", + buildStart() { + if (existsSync("../../gradio/node")) { + rmSync("../../gradio/node", { recursive: true }); + } + } + }, + resolve_imports(), + inject_dirname(), + copy_files() + ], + external: ["fsevents", "esbuild-wasm", "../compiler.js"] + }, + { + input: "src/svelte-submodules.ts", + output: { + file: join(output_svelte_dir, "svelte-submodules.js"), + format: "esm" + }, + onwarn, + plugins + }, + + { + input: "src/svelte-internal.ts", + output: { + file: join(output_svelte_dir, "svelte.js"), + format: "esm" + }, + onwarn, + plugins + }, + + { + input: "src/compiler.ts", + output: { + file: "../../gradio/node/dev/compiler.js", + format: "esm" + }, + + onwarn, + plugins: [ + ...plugins, + json({ + include: ["**/node_modules/**", "node_modules/**"] + }), + { + resolveId(id) { + if (id === "css-tree") { + return require.resolve( + "./node_modules/css-tree/dist/csstree.esm.js" + ); + } + } + } + ] + } +]; diff --git a/js/preview/src/build.ts b/js/preview/src/build.ts new file mode 100644 index 000000000000..2b9d9067826f --- /dev/null +++ b/js/preview/src/build.ts @@ -0,0 +1,59 @@ +import * as fs from "fs"; +import { join } from "path"; +import { build } from "vite"; +import { plugins, make_gradio_plugin } from "./plugins"; +import path from "path"; +import { examine_module } from "./index"; + +import { patch } from "toml-patch"; +interface BuildOptions { + component_dir: string; + root_dir: string; +} + +export async function make_build({ + component_dir, + root_dir +}: BuildOptions): Promise { + process.env.gradio_mode = "dev"; + const svelte_dir = join(root_dir, "assets", "svelte"); + + const module_meta = examine_module(component_dir, root_dir, "build"); + try { + for (const comp of module_meta) { + const template_dir = comp.template_dir; + const source_dir = comp.frontend_dir; + + const entries = ["interactive", "example", "static"]; + + for (const entry of entries) { + try { + const x = await build({ + plugins: [ + ...plugins, + make_gradio_plugin({ mode: "build", svelte_dir }) + ], + build: { + emptyOutDir: true, + outDir: join(template_dir, entry), + lib: { + entry: join(source_dir, entry, "index.ts"), + fileName: "index.js", + formats: ["es"] + }, + rollupOptions: { + output: { + entryFileNames: "[name].js" + } + } + } + }); + } catch (e) { + console.error(e); + } + } + } + } catch (e) { + console.error(e); + } +} diff --git a/js/preview/src/compiler.ts b/js/preview/src/compiler.ts new file mode 100644 index 000000000000..de5f5c6383de --- /dev/null +++ b/js/preview/src/compiler.ts @@ -0,0 +1,2 @@ +export * from "svelte/compiler"; +export * as default from "svelte/compiler"; diff --git a/js/preview/src/dev.ts b/js/preview/src/dev.ts new file mode 100644 index 000000000000..6d0009943e14 --- /dev/null +++ b/js/preview/src/dev.ts @@ -0,0 +1,139 @@ +import { join } from "path"; +import * as fs from "fs"; +import { createServer, createLogger } from "vite"; +import { plugins, make_gradio_plugin } from "./plugins"; +import { examine_module } from "./index"; + +const vite_messages_to_ignore = [ + "Default and named imports from CSS files are deprecated." +]; + +const logger = createLogger(); +const originalWarning = logger.warn; +logger.warn = (msg, options) => { + if (vite_messages_to_ignore.some((m) => msg.includes(m))) return; + + originalWarning(msg, options); +}; + +interface ServerOptions { + component_dir: string; + root_dir: string; + frontend_port: number; + backend_port: number; + host: string; +} + +export async function create_server({ + component_dir, + root_dir, + frontend_port, + backend_port, + host +}: ServerOptions): Promise { + process.env.gradio_mode = "dev"; + const imports = generate_imports(component_dir, root_dir); + + const NODE_DIR = join(root_dir, "..", "..", "node", "dev"); + const svelte_dir = join(root_dir, "assets", "svelte"); + + try { + const server = await createServer({ + esbuild: false, + customLogger: logger, + mode: "development", + configFile: false, + root: root_dir, + + optimizeDeps: { + disabled: true + }, + server: { + port: frontend_port, + host: host, + fs: { + allow: [root_dir, NODE_DIR, component_dir] + } + }, + plugins: [ + ...plugins, + make_gradio_plugin({ + mode: "dev", + backend_port, + svelte_dir, + imports + }) + ] + }); + + await server.listen(); + + console.info( + `[orange3]Frontend Server[/] (Go here): ${server.resolvedUrls?.local}` + ); + } catch (e) { + console.error(e); + } +} + +function find_frontend_folders(start_path: string): string[] { + if (!fs.existsSync(start_path)) { + console.warn("No directory found at:", start_path); + return []; + } + + if (fs.existsSync(join(start_path, "pyproject.toml"))) return [start_path]; + + const results: string[] = []; + const dir = fs.readdirSync(start_path); + dir.forEach((dir) => { + const filepath = join(start_path, dir); + if (fs.existsSync(filepath)) { + if (fs.existsSync(join(filepath, "pyproject.toml"))) + results.push(filepath); + } + }); + + return results; +} + +function to_posix(_path: string): string { + const isExtendedLengthPath = /^\\\\\?\\/.test(_path); + const hasNonAscii = /[^\u0000-\u0080]+/.test(_path); // eslint-disable-line no-control-regex + + if (isExtendedLengthPath || hasNonAscii) { + return _path; + } + + return _path.replace(/\\/g, "/"); +} + +function generate_imports(component_dir: string, root: string): string { + const components = find_frontend_folders(component_dir); + + const component_entries = components.flatMap((component) => { + return examine_module(component, root, "dev"); + }); + + const imports = component_entries.reduce((acc, component) => { + const x = { + interactive: to_posix(join(component.frontend_dir, "interactive")), + static: to_posix(join(component.frontend_dir, "static")), + example: to_posix(join(component.frontend_dir, "example")) + }; + + const interactive = fs.existsSync(x.interactive) + ? `interactive: () => import("${x.interactive}"),\n` + : ""; + const example = fs.existsSync(x.example) + ? `example: () => import("${x.example}"),\n` + : ""; + return `${acc}"${component.component_class_id}": { + ${interactive} + ${example} + static: () => import("${x.static}") + },\n`; + }, ""); + + return `{${imports}}`; +} diff --git a/js/preview/src/examine.py b/js/preview/src/examine.py new file mode 100644 index 000000000000..604f3d2999bd --- /dev/null +++ b/js/preview/src/examine.py @@ -0,0 +1,71 @@ +import argparse +import importlib +import inspect +import os +from pathlib import Path + +from tomlkit import dumps, parse + +from gradio.blocks import BlockContext +from gradio.components import Component + +if __name__ == "__main__": + + parser = argparse.ArgumentParser(description="Description of your program") + parser.add_argument("-m", "--mode", help="Build mode or dev mode") + args = parser.parse_args() + + with open("../pyproject.toml") as f: + pyproject_source = f.read() + + pyproject_toml = parse(pyproject_source) + if "gradio custom component" not in pyproject_toml["project"]["keywords"]: + exit(0) + + module_name = pyproject_toml["project"]["name"] + module = importlib.import_module(module_name) + + artifacts: list[str] = pyproject_toml["tool"]["hatch"]["build"]["artifacts"] + + + def get_relative_path(path): + return ( + os.path.abspath(Path(__file__).parent / path) + .replace(os.path.abspath(os.getcwd()), "") + .lstrip("/") + ) + + + for name in dir(module): + value = getattr(module, name) + if name.startswith("__"): + continue + + if inspect.isclass(value) and ( + issubclass(value, BlockContext) or issubclass(value, Component) + ): + file_location = Path(inspect.getfile(value)).parent + + found = [ + x + for x in artifacts + if get_relative_path(Path("..") / x) + == get_relative_path(file_location / value.TEMPLATE_DIR) + ] + if len(found) == 0: + artifacts.append( + os.path.abspath(file_location / value.TEMPLATE_DIR) + .replace(os.path.abspath(Path("..")), "") + .lstrip("/") + ) + + print( + f"{name}~|~|~|~{os.path.abspath(file_location / value.TEMPLATE_DIR)}~|~|~|~{os.path.abspath(file_location / value.FRONTEND_DIR)}~|~|~|~{value.get_component_class_id()}" + ) + continue + + if args.mode == "build": + pyproject_toml["tool"]["hatch"]["build"]["artifacts"] = artifacts + + with open("../pyproject.toml", "w") as f: + f.write(dumps(pyproject_toml)) diff --git a/js/preview/src/index.ts b/js/preview/src/index.ts new file mode 100644 index 000000000000..f99c0d48024c --- /dev/null +++ b/js/preview/src/index.ts @@ -0,0 +1,180 @@ +import { ChildProcess, spawn, spawnSync } from "node:child_process"; +import * as net from "net"; + +import { create_server } from "./dev"; +import { make_build } from "./build"; +import { join } from "path"; +import which from "which"; + +export interface ComponentMeta { + name: string; + template_dir: string; + frontend_dir: string; + component_class_id: string; +} + +const args = process.argv.slice(2); +// get individual args as `--arg value` or `value` + +function parse_args(args: string[]): Record { + const arg_map: Record = {}; + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + if (arg.startsWith("--")) { + const name = arg.slice(2); + const value = args[i + 1]; + arg_map[name] = value; + i++; + } + } + return arg_map; +} + +const parsed_args = parse_args(args); + +async function run(): Promise { + if (parsed_args.mode === "build") { + await make_build({ + component_dir: parsed_args["component-directory"], + root_dir: parsed_args.root + }); + } else { + const [backend_port, frontend_port] = await find_free_ports(7860, 8860); + const options = { + component_dir: parsed_args["component-directory"], + root_dir: parsed_args.root, + frontend_port, + backend_port, + host: parsed_args.host, + ...parsed_args + }; + process.env.GRADIO_BACKEND_PORT = backend_port.toString(); + + const _process = spawn( + which.sync("gradio"), + [parsed_args.app, "--watch-dirs", options.component_dir], + { + shell: true, + stdio: "pipe", + cwd: process.cwd(), + env: { + ...process.env, + GRADIO_SERVER_PORT: backend_port.toString(), + PYTHONUNBUFFERED: "true" + } + } + ); + + _process.stdout.setEncoding("utf8"); + _process.stderr.setEncoding("utf8"); + + function std_out(mode: "stdout" | "stderr") { + return function (data: Buffer): void { + const _data = data.toString(); + + if (_data.includes("Running on")) { + create_server({ + component_dir: options.component_dir, + root_dir: options.root_dir, + frontend_port, + backend_port, + host: options.host + }); + } + + process[mode].write(_data); + }; + } + + _process.stdout.on("data", std_out("stdout")); + _process.stderr.on("data", std_out("stderr")); + _process.on("exit", () => kill_process(_process)); + _process.on("close", () => kill_process(_process)); + _process.on("disconnect", () => kill_process(_process)); + } +} + +function kill_process(process: ChildProcess): void { + process.kill("SIGKILL"); +} + +export { create_server }; + +run(); + +export async function find_free_ports( + start_port: number, + end_port: number +): Promise<[number, number]> { + let found_ports: number[] = []; + + for (let port = start_port; port < end_port; port++) { + if (await is_free_port(port)) { + found_ports.push(port); + if (found_ports.length === 2) { + return [found_ports[0], found_ports[1]]; + } + } + } + + throw new Error( + `Could not find free ports: there were not enough ports available.` + ); +} + +export function is_free_port(port: number): Promise { + return new Promise((accept, reject) => { + const sock = net.createConnection(port, "127.0.0.1"); + sock.once("connect", () => { + sock.end(); + accept(false); + }); + sock.once("error", (e) => { + sock.destroy(); + //@ts-ignore + if (e.code === "ECONNREFUSED") { + accept(true); + } else { + reject(e); + } + }); + }); +} + +function is_truthy(value: T | null | undefined | false): value is T { + return value !== null && value !== undefined && value !== false; +} + +export function examine_module( + component_dir: string, + root: string, + mode: "build" | "dev" +): ComponentMeta[] { + const _process = spawnSync( + which.sync("python"), + [join(root, "..", "..", "node", "examine.py"), "-m", mode], + { + cwd: join(component_dir, "backend"), + stdio: "pipe" + } + ); + + return _process.stdout + .toString() + .trim() + .split("\n") + .map((line) => { + const [name, template_dir, frontend_dir, component_class_id] = + line.split("~|~|~|~"); + if (name && template_dir && frontend_dir && component_class_id) { + return { + name: name.trim(), + template_dir: template_dir.trim(), + frontend_dir: frontend_dir.trim(), + component_class_id: component_class_id.trim() + }; + } + return false; + }) + .filter(is_truthy); +} diff --git a/js/preview/src/placeholder.ts b/js/preview/src/placeholder.ts new file mode 100644 index 000000000000..ff8b4c56321a --- /dev/null +++ b/js/preview/src/placeholder.ts @@ -0,0 +1 @@ +export default {}; diff --git a/js/preview/src/plugins.ts b/js/preview/src/plugins.ts new file mode 100644 index 000000000000..4a52ba97514e --- /dev/null +++ b/js/preview/src/plugins.ts @@ -0,0 +1,110 @@ +import type { Plugin, PluginOption } from "vite"; +import { svelte } from "@sveltejs/vite-plugin-svelte"; +import { transform } from "sucrase"; +import { viteCommonjs } from "@originjs/vite-plugin-commonjs"; +import sucrase from "@rollup/plugin-sucrase"; +import { createLogger } from "vite"; +import { join } from "path"; + +const svelte_codes_to_ignore: Record = { + "reactive-component": "Icon" +}; + +const RE_SVELTE_IMPORT = + /import\s+([\w*{},\s]+)\s+from\s+['"](svelte|svelte\/internal)['"]/g; + +export const plugins: PluginOption[] = [ + viteCommonjs() as Plugin, + svelte({ + onwarn(warning, handler) { + if ( + svelte_codes_to_ignore.hasOwnProperty(warning.code) && + svelte_codes_to_ignore[warning.code] && + warning.message.includes(svelte_codes_to_ignore[warning.code]) + ) { + return; + } + handler!(warning); + }, + prebundleSvelteLibraries: false, + hot: true, + compilerOptions: { + discloseVersion: false + }, + preprocess: [ + { + script: ({ attributes, filename, content }) => { + if (attributes.lang === "ts") { + const compiledCode = transform(content, { + transforms: ["typescript"], + keepUnusedImports: true + }); + return { + code: compiledCode.code, + map: compiledCode.sourceMap + }; + } + } + } + ] + }) as unknown as Plugin, + sucrase({ + transforms: ["typescript"], + include: ["**/*.ts", "**/*.tsx"] + }) +]; + +interface GradioPluginOptions { + mode: "dev" | "build"; + svelte_dir: string; + backend_port?: number; + imports?: string; +} + +export function make_gradio_plugin({ + mode, + svelte_dir, + backend_port, + imports +}: GradioPluginOptions): Plugin { + return { + name: "gradio", + enforce: "pre", + transform(code) { + const new_code = code.replace(RE_SVELTE_IMPORT, (str, $1, $2) => { + const identifier = $1.trim().startsWith("* as") + ? $1.replace("* as", "").trim() + : $1.trim(); + return `const ${identifier.replace( + " as ", + ": " + )} = window.__gradio__svelte__internal;`; + }); + return { + code: new_code, + map: null + }; + }, + resolveId(id) { + if ( + id !== "svelte" && + id !== "svelte/internal" && + id.startsWith("svelte/") + ) { + return join(svelte_dir, "svelte-submodules.js"); + } + }, + transformIndexHtml(html) { + return mode === "dev" + ? [ + { + tag: "script", + children: `window.__GRADIO_DEV__ = "dev"; + window.__GRADIO__SERVER_PORT__ = ${backend_port}; + window.__GRADIO__CC__ = ${imports};` + } + ] + : undefined; + } + }; +} diff --git a/js/preview/src/svelte-disclose.ts b/js/preview/src/svelte-disclose.ts new file mode 100644 index 000000000000..54354dfd5a9e --- /dev/null +++ b/js/preview/src/svelte-disclose.ts @@ -0,0 +1 @@ +export * from "svelte/internal/disclose-version"; diff --git a/js/preview/src/svelte-internal.ts b/js/preview/src/svelte-internal.ts new file mode 100644 index 000000000000..c51e78007f14 --- /dev/null +++ b/js/preview/src/svelte-internal.ts @@ -0,0 +1 @@ +export * from "svelte/internal"; diff --git a/js/preview/src/svelte-submodules.ts b/js/preview/src/svelte-submodules.ts new file mode 100644 index 000000000000..2c331977fd74 --- /dev/null +++ b/js/preview/src/svelte-submodules.ts @@ -0,0 +1,5 @@ +export * from "svelte/transition"; +export { spring, tweened } from "svelte/motion"; +export * from "svelte/store"; +export * from "svelte/easing"; +export * from "svelte/animate"; diff --git a/js/preview/src/svelte.ts b/js/preview/src/svelte.ts new file mode 100644 index 000000000000..69f74aae9d07 --- /dev/null +++ b/js/preview/src/svelte.ts @@ -0,0 +1,16 @@ +export { + // this proxy is very important, to ensure that we always refer to the same base Component class which is critical for our components to work + SvelteComponent as SvelteComponentDev, + SvelteComponent, + onMount, + onDestroy, + beforeUpdate, + afterUpdate, + setContext, + getContext, + getAllContexts, + hasContext, + tick, + createEventDispatcher, + SvelteComponentTyped +} from "svelte/internal"; diff --git a/js/preview/test/imageslider/.gitignore b/js/preview/test/imageslider/.gitignore new file mode 100644 index 000000000000..4162e6e9956a --- /dev/null +++ b/js/preview/test/imageslider/.gitignore @@ -0,0 +1,9 @@ +# Python build +.eggs/ +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +__tmp/* +*.pyi \ No newline at end of file diff --git a/js/preview/test/imageslider/backend/imageslider/__init__.py b/js/preview/test/imageslider/backend/imageslider/__init__.py new file mode 100644 index 000000000000..190fe0e64385 --- /dev/null +++ b/js/preview/test/imageslider/backend/imageslider/__init__.py @@ -0,0 +1,4 @@ + +from .imageslider import ImageSlider + +__all__ = ['ImageSlider'] diff --git a/js/preview/test/imageslider/backend/imageslider/imageslider.py b/js/preview/test/imageslider/backend/imageslider/imageslider.py new file mode 100644 index 000000000000..1af96a57f8cc --- /dev/null +++ b/js/preview/test/imageslider/backend/imageslider/imageslider.py @@ -0,0 +1,329 @@ +"""gr.Image() component.""" + +from __future__ import annotations + +import warnings +from pathlib import Path +from typing import Any, Literal + +import numpy as np +import PIL +import PIL.ImageOps +from gradio_client import utils as client_utils +from gradio_client.documentation import document, set_documentation_group +from PIL import Image as _Image # using _ to minimize namespace pollution + +from gradio import processing_utils, utils +from gradio.components.base import Component, StreamingInput, _Keywords +from gradio.data_classes import FileData +from gradio.events import Events + +set_documentation_group("component") +_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 + + +@document() +class ImageSlider(StreamingInput, Component): + """ + Creates an image component that can be used to upload/draw images (as an input) or display images (as an output). + Preprocessing: passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`. + Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image. + Examples-format: a {str} filepath to a local file that contains the image. + Demos: image_mod, image_mod_default_image + Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary_app, create-your-own-friends-with-a-gan + """ + + EVENTS = [ + Events.edit, + Events.clear, + Events.change, + Events.stream, + Events.select, + Events.upload, + ] + data_model = FileData + + def __init__( + self, + value: str | _Image.Image | np.ndarray | None = None, + *, + shape: tuple[int, int] | None = None, + height: int | None = None, + width: int | None = None, + image_mode: Literal[ + "1", "L", "P", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F" + ] = "RGB", + invert_colors: bool = False, + source: Literal["upload", "webcam", "canvas"] = "upload", + tool: Literal["editor", "select", "sketch", "color-sketch"] | None = None, + type: Literal["numpy", "pil", "filepath"] = "numpy", + label: str | None = None, + every: float | None = None, + show_label: bool | None = None, + show_download_button: bool = True, + container: bool = True, + scale: int | None = None, + min_width: int = 160, + interactive: bool | None = None, + visible: bool = True, + streaming: bool = False, + elem_id: str | None = None, + elem_classes: list[str] | str | None = None, + mirror_webcam: bool = True, + brush_radius: float | None = None, + brush_color: str = "#000000", + mask_opacity: float = 0.7, + show_share_button: bool | None = None, + **kwargs, + ): + """ + Parameters: + value: A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. + shape: (width, height) shape to crop and resize image when passed to function. If None, matches input image size. Pass None for either width or height to only crop and resize the other. + height: Height of the displayed image in pixels. + width: Width of the displayed image in pixels. + image_mode: "RGB" if color, or "L" if black and white. See https://pillow.readthedocs.io/en/stable/handbook/concepts.html for other supported image modes and their meaning. + invert_colors: whether to invert the image as a preprocessing step. + source: Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools. + tool: Tools used for editing. "editor" allows a full screen editor (and is the default if source is "upload" or "webcam"), "select" provides a cropping and zoom tool, "sketch" allows you to create a binary sketch (and is the default if source="canvas"), and "color-sketch" allows you to created a sketch in different colors. "color-sketch" can be used with source="upload" or "webcam" to allow sketching on an image. "sketch" can also be used with "upload" or "webcam" to create a mask over an image and in that case both the image and mask are passed into the function as a dictionary with keys "image" and "mask" respectively. + type: The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (height, width, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. + label: component name in interface. + every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. + show_label: if True, will display label. + show_download_button: If True, will display button to download image. + container: If True, will place the component in a container - providing some extra padding around the border. + scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. + min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. + interactive: if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output. + visible: If False, component will be hidden. + streaming: If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. + mirror_webcam: If True webcam will be mirrored. Default is True. + brush_radius: Size of the brush for Sketch. Default is None which chooses a sensible default + brush_color: Color of the brush for Sketch as hex string. Default is "#000000". + mask_opacity: Opacity of mask drawn on image, as a value between 0 and 1. + show_share_button: If True, will show a share icon in the corner of the component that allows user to share outputs to Hugging Face Spaces Discussions. If False, icon does not appear. If set to None (default behavior), then the icon appears if this Gradio app is launched on Spaces, but not otherwise. + """ + self.brush_radius = brush_radius + self.brush_color = brush_color + self.mask_opacity = mask_opacity + self.mirror_webcam = mirror_webcam + valid_types = ["numpy", "pil", "filepath"] + if type not in valid_types: + raise ValueError( + f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" + ) + self.type = type + self.shape = shape + self.height = height + self.width = width + self.image_mode = image_mode + valid_sources = ["upload", "webcam", "canvas"] + if source not in valid_sources: + raise ValueError( + f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}" + ) + self.source = source + if tool is None: + self.tool = "sketch" if source == "canvas" else "editor" + else: + self.tool = tool + self.invert_colors = invert_colors + self.streaming = streaming + self.show_download_button = show_download_button + if streaming and source != "webcam": + raise ValueError("Image streaming only available if source is 'webcam'.") + self.show_share_button = ( + (utils.get_space() is not None) + if show_share_button is None + else show_share_button + ) + super().__init__( + label=label, + every=every, + show_label=show_label, + container=container, + scale=scale, + min_width=min_width, + interactive=interactive, + visible=visible, + elem_id=elem_id, + elem_classes=elem_classes, + value=value, + **kwargs, + ) + + def get_config(self): + return { + "image_mode": self.image_mode, + "shape": self.shape, + "height": self.height, + "width": self.width, + "source": self.source, + "tool": self.tool, + "value": self.value, + "streaming": self.streaming, + "mirror_webcam": self.mirror_webcam, + "brush_radius": self.brush_radius, + "brush_color": self.brush_color, + "mask_opacity": self.mask_opacity, + "selectable": self.selectable, + "show_share_button": self.show_share_button, + "show_download_button": self.show_download_button, + **Component.get_config(self), + } + + @staticmethod + def update( + value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, + height: int | None = None, + width: int | None = None, + label: str | None = None, + show_label: bool | None = None, + show_download_button: bool | None = None, + container: bool | None = None, + scale: int | None = None, + min_width: int | None = None, + interactive: bool | None = None, + visible: bool | None = None, + brush_radius: float | None = None, + brush_color: str | None = None, + mask_opacity: float | None = None, + show_share_button: bool | None = None, + ): + return { + "height": height, + "width": width, + "label": label, + "show_label": show_label, + "show_download_button": show_download_button, + "container": container, + "scale": scale, + "min_width": min_width, + "interactive": interactive, + "visible": visible, + "value": value, + "brush_radius": brush_radius, + "brush_color": brush_color, + "mask_opacity": mask_opacity, + "show_share_button": show_share_button, + "__type__": "update", + } + + def _format_image( + self, im: _Image.Image | None + ) -> np.ndarray | _Image.Image | str | None: + """Helper method to format an image based on self.type""" + if im is None: + return im + fmt = im.format + if self.type == "pil": + return im + elif self.type == "numpy": + return np.array(im) + elif self.type == "filepath": + path = self.pil_to_temp_file( + im, dir=self.DEFAULT_TEMP_DIR, format=fmt or "png" + ) + self.temp_files.add(path) + return path + else: + raise ValueError( + "Unknown type: " + + str(self.type) + + ". Please choose from: 'numpy', 'pil', 'filepath'." + ) + + def preprocess( + self, x: str | dict[str, str] + ) -> np.ndarray | _Image.Image | str | dict | None: + """ + Parameters: + x: base64 url data, or (if tool == "sketch") a dict of image and mask base64 url data + Returns: + image in requested format, or (if tool == "sketch") a dict of image and mask in requested format + """ + if x is None: + return x + + mask = "" + if self.tool == "sketch" and self.source in ["upload", "webcam"]: + assert isinstance(x, dict) + x, mask = x["image"], x["mask"] + + if isinstance(x, str): + im = processing_utils.decode_base64_to_image(x) + else: + im = PIL.Image.open(self.make_temp_copy_if_needed(x["name"])) + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + im = im.convert(self.image_mode) + if self.shape is not None: + im = processing_utils.resize_and_crop(im, self.shape) + if self.invert_colors: + im = PIL.ImageOps.invert(im) + if ( + self.source == "webcam" + and self.mirror_webcam is True + and self.tool != "color-sketch" + ): + im = PIL.ImageOps.mirror(im) + + if self.tool == "sketch" and self.source in ["upload", "webcam"]: + mask_im = processing_utils.decode_base64_to_image(mask) + + if mask_im.mode == "RGBA": # whiten any opaque pixels in the mask + alpha_data = mask_im.getchannel("A").convert("L") + mask_im = _Image.merge("RGB", [alpha_data, alpha_data, alpha_data]) + return { + "image": self._format_image(im), + "mask": self._format_image(mask_im), + } + + return self._format_image(im) + + def postprocess( + self, y: np.ndarray | _Image.Image | str | Path | None + ) -> str | None: + """ + Parameters: + y: image as a numpy array, PIL Image, string/Path filepath, or string URL + Returns: + base64 url data + """ + if y is None: + return None + if isinstance(y, np.ndarray): + path = self.base64_to_temp_file_if_needed( + processing_utils.encode_array_to_base64(y), "file.png" + ) + elif isinstance(y, _Image.Image): + path = self.base64_to_temp_file_if_needed( + processing_utils.encode_pil_to_base64(y), "file.png" + ) + elif isinstance(y, (str, Path)): + name = y if isinstance(y, str) else y.name + if client_utils.is_http_url_like(name): + path = self.download_temp_copy_if_needed(name) + else: + path = self.make_temp_copy_if_needed(name) + else: + raise ValueError("Cannot process this value as an Image") + return FileData(name=path, data=None, is_file=True) + + def check_streamable(self): + if self.source != "webcam" and self.streaming: + raise ValueError("Image streaming only available if source is 'webcam'.") + + def as_example(self, input_data: str | None) -> str: + if input_data is None: + return "" + elif ( + self.root_url + ): # If an externally hosted image, don't convert to absolute path + return input_data + return str(utils.abspath(input_data)) + + def example_inputs(self) -> Any: + return "https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png" diff --git a/js/preview/test/imageslider/demo/__init__.py b/js/preview/test/imageslider/demo/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/js/preview/test/imageslider/demo/app.py b/js/preview/test/imageslider/demo/app.py new file mode 100644 index 000000000000..62e6e1c89394 --- /dev/null +++ b/js/preview/test/imageslider/demo/app.py @@ -0,0 +1,9 @@ +import gradio as gr +from imageslider import ImageSlider + +with gr.Blocks() as demo: + ImageSlider(label="asd", interactive=True) + ImageSlider(label="Static", interactive=False) + + +demo.launch() diff --git a/js/preview/test/imageslider/frontend/example/Image.svelte b/js/preview/test/imageslider/frontend/example/Image.svelte new file mode 100644 index 000000000000..4544ce0b6882 --- /dev/null +++ b/js/preview/test/imageslider/frontend/example/Image.svelte @@ -0,0 +1,41 @@ + + + + + + + diff --git a/js/preview/test/imageslider/frontend/example/index.ts b/js/preview/test/imageslider/frontend/example/index.ts new file mode 100644 index 000000000000..f77af3dcfa03 --- /dev/null +++ b/js/preview/test/imageslider/frontend/example/index.ts @@ -0,0 +1 @@ +export { default } from "./Image.svelte"; diff --git a/js/preview/test/imageslider/frontend/interactive/Cropper.svelte b/js/preview/test/imageslider/frontend/interactive/Cropper.svelte new file mode 100644 index 000000000000..c26bf2a0775c --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/Cropper.svelte @@ -0,0 +1,33 @@ + + + + + diff --git a/js/preview/test/imageslider/frontend/interactive/Image.svelte b/js/preview/test/imageslider/frontend/interactive/Image.svelte new file mode 100644 index 000000000000..748e3834a737 --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/Image.svelte @@ -0,0 +1,447 @@ + + + + +
+ {#if source === "upload"} + + {#if (value === null && !static_image) || streaming} + + {:else if tool === "select"} + + (handle_clear(e), (tool = "editor"))} + /> + {:else if tool === "editor"} + (tool = "select")} + on:clear={handle_clear} + editable + /> + + + + + + {:else if (tool === "sketch" || tool === "color-sketch") && (value !== null || static_image)} + {#key static_image} + + {/key} + {#if img_width > 0} + + sketch.undo()} + on:clear_mask={handle_mask_clear} + on:remove_image={handle_sketch_clear} + /> + {#if tool === "color-sketch" || tool === "sketch"} + + {/if} + {/if} + {:else} + + + + hello + {/if} + + {:else if source === "canvas"} + sketch.undo()} + on:remove_image={handle_sketch_clear} + /> + {#if tool === "color-sketch"} + + {/if} + + {:else if (value === null && !static_image) || streaming} + {#if source === "webcam" && !static_image} + + tool === "color-sketch" ? handle_upload(e) : handle_save(e, true)} + on:stream={handle_save} + on:error + {streaming} + {pending} + {mirror_webcam} + /> + {/if} + {:else if tool === "select"} + + (handle_clear(e), (tool = "editor"))} + /> + {:else if tool === "editor"} + (tool = "select")} + on:clear={handle_clear} + editable + /> + + + + + + {:else if (tool === "sketch" || tool === "color-sketch") && (value !== null || static_image)} + {#key static_image} + + {/key} + {#if img_width > 0} + + sketch.undo()} + on:remove_image={handle_sketch_clear} + /> + {#if tool === "color-sketch" || tool === "sketch"} + + {/if} + {/if} + {:else} + + + + + + {/if} +
+ + diff --git a/js/preview/test/imageslider/frontend/interactive/InteractiveImage.svelte b/js/preview/test/imageslider/frontend/interactive/InteractiveImage.svelte new file mode 100644 index 000000000000..6c1aeb84673f --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/InteractiveImage.svelte @@ -0,0 +1,104 @@ + + + + + + + + gradio.dispatch("edit")} + on:clear={() => gradio.dispatch("clear")} + on:stream={() => gradio.dispatch("stream")} + on:drag={({ detail }) => (dragging = detail)} + on:upload={() => gradio.dispatch("upload")} + on:select={({ detail }) => gradio.dispatch("select", detail)} + on:share={({ detail }) => gradio.dispatch("share", detail)} + on:error={({ detail }) => { + loading_status = loading_status || {}; + loading_status.status = "error"; + gradio.dispatch("error", detail); + }} + {label} + {show_label} + {pending} + {streaming} + {mirror_webcam} + i18n={gradio.i18n} + > + + + diff --git a/js/preview/test/imageslider/frontend/interactive/ModifySketch.svelte b/js/preview/test/imageslider/frontend/interactive/ModifySketch.svelte new file mode 100644 index 000000000000..8e603f247e95 --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/ModifySketch.svelte @@ -0,0 +1,45 @@ + + +
+ dispatch("undo")} /> + + {#if show_eraser} + { + dispatch("clear_mask"); + event.stopPropagation(); + }} + /> + {/if} + + { + dispatch("remove_image"); + event.stopPropagation(); + }} + /> +
+ + diff --git a/js/preview/test/imageslider/frontend/interactive/Sketch.svelte b/js/preview/test/imageslider/frontend/interactive/Sketch.svelte new file mode 100644 index 000000000000..ea2654080f46 --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/Sketch.svelte @@ -0,0 +1,624 @@ + + +
+ {#if line_count === 0} +
+ Start drawing +
+ {/if} + {#each canvas_types as { name, zIndex, opacity }} + + {/each} +
+ + diff --git a/js/preview/test/imageslider/frontend/interactive/SketchSettings.svelte b/js/preview/test/imageslider/frontend/interactive/SketchSettings.svelte new file mode 100644 index 000000000000..54303f98177d --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/SketchSettings.svelte @@ -0,0 +1,77 @@ + + +
+ + (show_size = !show_size)} + /> + {#if show_size} + + {/if} + + + {#if mode !== "mask"} + + (show_col = !show_col)} + /> + {#if show_col} + + {/if} + + {/if} +
+ + diff --git a/js/preview/test/imageslider/frontend/interactive/Webcam.svelte b/js/preview/test/imageslider/frontend/interactive/Webcam.svelte new file mode 100644 index 000000000000..e1ec1195a900 --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/Webcam.svelte @@ -0,0 +1,204 @@ + + +
+ +
+ + diff --git a/js/preview/test/imageslider/frontend/interactive/index.ts b/js/preview/test/imageslider/frontend/interactive/index.ts new file mode 100644 index 000000000000..dbae9def8ba4 --- /dev/null +++ b/js/preview/test/imageslider/frontend/interactive/index.ts @@ -0,0 +1,2 @@ +export { default as Webcam } from "./Webcam.svelte"; +export { default } from "./InteractiveImage.svelte"; diff --git a/js/preview/test/imageslider/frontend/package.json b/js/preview/test/imageslider/frontend/package.json new file mode 100644 index 000000000000..e9731340fd06 --- /dev/null +++ b/js/preview/test/imageslider/frontend/package.json @@ -0,0 +1,27 @@ +{ + "name": "imageslider", + "version": "0.2.0", + "description": "Gradio UI packages", + "type": "module", + "main": "./index.svelte", + "author": "", + "license": "ISC", + "private": true, + "dependencies": { + "@gradio/atoms": "workspace:^", + "@gradio/icons": "workspace:^", + "@gradio/statustracker": "workspace:^", + "@gradio/upload": "workspace:^", + "@gradio/utils": "workspace:^", + "cropperjs": "^1.5.12", + "lazy-brush": "^1.0.1", + "resize-observer-polyfill": "^1.5.1" + }, + "main_changeset": true, + "exports": { + "./package.json": "./package.json", + "./interactive": "./interactive/index.ts", + "./static": "./static/index.ts", + "./example": "./example/index.ts" + } +} \ No newline at end of file diff --git a/js/preview/test/imageslider/frontend/shared/utils.ts b/js/preview/test/imageslider/frontend/shared/utils.ts new file mode 100644 index 000000000000..e737479e39f2 --- /dev/null +++ b/js/preview/test/imageslider/frontend/shared/utils.ts @@ -0,0 +1,24 @@ +export const get_coordinates_of_clicked_image = ( + evt: MouseEvent +): [number, number] | null => { + let image = evt.currentTarget as HTMLImageElement; + + const imageRect = image.getBoundingClientRect(); + const xScale = image.naturalWidth / imageRect.width; + const yScale = image.naturalHeight / imageRect.height; + if (xScale > yScale) { + const displayed_height = image.naturalHeight / xScale; + const y_offset = (imageRect.height - displayed_height) / 2; + var x = Math.round((evt.clientX - imageRect.left) * xScale); + var y = Math.round((evt.clientY - imageRect.top - y_offset) * xScale); + } else { + const displayed_width = image.naturalWidth / yScale; + const x_offset = (imageRect.width - displayed_width) / 2; + var x = Math.round((evt.clientX - imageRect.left - x_offset) * yScale); + var y = Math.round((evt.clientY - imageRect.top) * yScale); + } + if (x < 0 || x >= image.naturalWidth || y < 0 || y >= image.naturalHeight) { + return null; + } + return [x, y]; +}; diff --git a/js/preview/test/imageslider/frontend/src/Image.svelte b/js/preview/test/imageslider/frontend/src/Image.svelte new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/js/preview/test/imageslider/frontend/static/ImagePreview.svelte b/js/preview/test/imageslider/frontend/static/ImagePreview.svelte new file mode 100644 index 000000000000..f0c06d1c339f --- /dev/null +++ b/js/preview/test/imageslider/frontend/static/ImagePreview.svelte @@ -0,0 +1,95 @@ + + + +{#if value_ === null} + +{:else} +
+ {#if show_download_button} + + + + {/if} + {#if show_share_button} + { + if (!value) return ""; + let url = await uploadToHuggingFace(value, "base64"); + return ``; + }} + {value} + /> + {/if} +
+ + + + +{/if} + + diff --git a/js/preview/test/imageslider/frontend/static/StaticImage.svelte b/js/preview/test/imageslider/frontend/static/StaticImage.svelte new file mode 100644 index 000000000000..30cae5f8138f --- /dev/null +++ b/js/preview/test/imageslider/frontend/static/StaticImage.svelte @@ -0,0 +1,72 @@ + + + + + + + gradio.dispatch("select", detail)} + on:share={({ detail }) => gradio.dispatch("share", detail)} + on:error={({ detail }) => gradio.dispatch("error", detail)} + {root} + {value} + {label} + {show_label} + {show_download_button} + {selectable} + {show_share_button} + i18n={gradio.i18n} + /> + diff --git a/js/preview/test/imageslider/frontend/static/index.ts b/js/preview/test/imageslider/frontend/static/index.ts new file mode 100644 index 000000000000..8d950f3e426f --- /dev/null +++ b/js/preview/test/imageslider/frontend/static/index.ts @@ -0,0 +1 @@ +export { default } from "./StaticImage.svelte"; diff --git a/js/preview/test/imageslider/pyproject.toml b/js/preview/test/imageslider/pyproject.toml new file mode 100644 index 000000000000..5c39ab3f4e0a --- /dev/null +++ b/js/preview/test/imageslider/pyproject.toml @@ -0,0 +1,40 @@ +[build-system] +requires = ["hatchling", "hatch-requirements-txt", "hatch-fancy-pypi-readme>=22.5.0"] +build-backend = "hatchling.build" + +[project] +name = "imageslider" +version = "0.0.1" +description = "Python library for easily interacting with trained machine learning models" +license = "Apache-2.0" +requires-python = ">=3.8" +authors = [ + { name = "YOUR NAME", email = "YOUREMAIL@domain.com" }, +] +keywords = ["machine learning", "reproducibility", "visualization"] +# Add dependencies here +dependencies = [] +classifiers = [ + 'Development Status :: 3 - Alpha', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Visualization', +] + +[tool.hatch.build] +artifacts = [ + "/backend/imageslider/templates", + "*.pyi" +] +packages = ["/backend/imageslider"] + +[tool.hatch.metadata] +allow-direct-references = true \ No newline at end of file diff --git a/js/preview/test/newnewtext/.gitignore b/js/preview/test/newnewtext/.gitignore new file mode 100644 index 000000000000..80a3eb23390b --- /dev/null +++ b/js/preview/test/newnewtext/.gitignore @@ -0,0 +1,10 @@ +# Python build +.eggs/ +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +__tmp/* +*.pyi +templates/ \ No newline at end of file diff --git a/js/preview/test/newnewtext/backend/newnewtext/__init__.py b/js/preview/test/newnewtext/backend/newnewtext/__init__.py new file mode 100644 index 000000000000..4c7b7a5942bd --- /dev/null +++ b/js/preview/test/newnewtext/backend/newnewtext/__init__.py @@ -0,0 +1,4 @@ + +from .newnewtext import NewNewText + +__all__ = ['NewNewText'] diff --git a/js/preview/test/newnewtext/backend/newnewtext/newnewtext.py b/js/preview/test/newnewtext/backend/newnewtext/newnewtext.py new file mode 100644 index 000000000000..8057ec0b21c9 --- /dev/null +++ b/js/preview/test/newnewtext/backend/newnewtext/newnewtext.py @@ -0,0 +1,191 @@ +"""gr.Textbox() component.""" + +from __future__ import annotations + +from typing import Any, Callable, Literal + +from gradio_client.documentation import document, set_documentation_group + +from gradio.components.base import ( + Component, + FormComponent, + _Keywords, +) +from gradio.events import Events + +set_documentation_group("component") + + +@document() +class NewNewText(FormComponent): + """ + Creates a textarea for user to enter string input or display string output. + Preprocessing: passes textarea value as a {str} into the function. + Postprocessing: expects a {str} returned from function and sets textarea value to it. + Examples-format: a {str} representing the textbox input. + + Demos: hello_world, diff_texts, sentence_builder + Guides: creating-a-chatbot, real-time-speech-recognition + """ + + EVENTS = [Events.change, Events.input, Events.select, Events.submit, Events.focus] + + def __init__( + self, + value: str | Callable | None = "", + *, + lines: int = 1, + max_lines: int = 20, + placeholder: str | None = None, + label: str | None = None, + info: str | None = None, + every: float | None = None, + show_label: bool | None = None, + container: bool = True, + scale: int | None = None, + min_width: int = 160, + interactive: bool | None = None, + visible: bool = True, + elem_id: str | None = None, + autofocus: bool = False, + elem_classes: list[str] | str | None = None, + type: Literal["text", "password", "email"] = "text", + text_align: Literal["left", "right"] | None = None, + rtl: bool = False, + show_copy_button: bool = False, + **kwargs, + ): + """ + Parameters: + value: default text to provide in textarea. If callable, the function will be called whenever the app loads to set the initial value of the component. + lines: minimum number of line rows to provide in textarea. + max_lines: maximum number of line rows to provide in textarea. + placeholder: placeholder hint to provide behind textarea. + label: component name in interface. + info: additional component description. + every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. + show_label: if True, will display label. + container: If True, will place the component in a container - providing some extra padding around the border. + scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. + min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. + interactive: if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. + visible: If False, component will be hidden. + autofocus: If True, will focus on the textbox when the page loads. + elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. + elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. + type: The type of textbox. One of: 'text', 'password', 'email', Default is 'text'. + text_align: How to align the text in the textbox, can be: "left", "right", or None (default). If None, the alignment is left if `rtl` is False, or right if `rtl` is True. Can only be changed if `type` is "text". + rtl: If True and `type` is "text", sets the direction of the text to right-to-left (cursor appears on the left of the text). Default is False, which renders cursor on the right. + show_copy_button: If True, includes a copy button to copy the text in the textbox. Only applies if show_label is True. + """ + if type not in ["text", "password", "email"]: + raise ValueError('`type` must be one of "text", "password", or "email".') + + self.lines = lines + if type == "text": + self.max_lines = max(lines, max_lines) + else: + self.max_lines = 1 + self.placeholder = placeholder + self.show_copy_button = show_copy_button + self.autofocus = autofocus + super().__init__( + label=label, + info=info, + every=every, + show_label=show_label, + container=container, + scale=scale, + min_width=min_width, + interactive=interactive, + visible=visible, + elem_id=elem_id, + elem_classes=elem_classes, + value=value, + **kwargs, + ) + self.type = type + self.rtl = rtl + self.text_align = text_align + + def get_config(self): + return { + "lines": self.lines, + "max_lines": self.max_lines, + "placeholder": self.placeholder, + "value": self.value, + "type": self.type, + "autofocus": self.autofocus, + "show_copy_button": self.show_copy_button, + "container": self.container, + "text_align": self.text_align, + "rtl": self.rtl, + **Component.get_config(self), + } + + @staticmethod + def update( + value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, + lines: int | None = None, + max_lines: int | None = None, + placeholder: str | None = None, + label: str | None = None, + info: str | None = None, + show_label: bool | None = None, + container: bool | None = None, + scale: int | None = None, + min_width: int | None = None, + visible: bool | None = None, + interactive: bool | None = None, + type: Literal["text", "password", "email"] | None = None, + text_align: Literal["left", "right"] | None = None, + rtl: bool | None = None, + show_copy_button: bool | None = None, + autofocus: bool | None = None, + ): + return { + "lines": lines, + "max_lines": max_lines, + "placeholder": placeholder, + "label": label, + "info": info, + "show_label": show_label, + "container": container, + "scale": scale, + "min_width": min_width, + "visible": visible, + "value": value, + "type": type, + "interactive": interactive, + "show_copy_button": show_copy_button, + "autofocus": autofocus, + "text_align": text_align, + "rtl": rtl, + "__type__": "update", + } + + def preprocess(self, x: str | None) -> str | None: + """ + Preprocesses input (converts it to a string) before passing it to the function. + Parameters: + x: text + Returns: + text + """ + return None if x is None else str(x) + + def postprocess(self, y: str | None) -> str | None: + """ + Postproccess the function output y by converting it to a str before passing it to the frontend. + Parameters: + y: function output to postprocess. + Returns: + text + """ + return None if y is None else str(y) + + def api_info(self) -> dict[str, list[str]]: + return {"type": "string"} + + def example_inputs(self) -> Any: + return "Hello!!" diff --git a/js/preview/test/newnewtext/demo/__init__.py b/js/preview/test/newnewtext/demo/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/js/preview/test/newnewtext/demo/app.py b/js/preview/test/newnewtext/demo/app.py new file mode 100644 index 000000000000..9d96f3fb5f23 --- /dev/null +++ b/js/preview/test/newnewtext/demo/app.py @@ -0,0 +1,7 @@ +import gradio as gr +from newnewtext import NewNewText + +with gr.Blocks() as demo: + NewNewText() + +demo.launch() diff --git a/js/preview/test/newnewtext/frontend/example/Textbox.svelte b/js/preview/test/newnewtext/frontend/example/Textbox.svelte new file mode 100644 index 000000000000..1fdf7500fa28 --- /dev/null +++ b/js/preview/test/newnewtext/frontend/example/Textbox.svelte @@ -0,0 +1,46 @@ + + +
+ {value} +
+ + diff --git a/js/preview/test/newnewtext/frontend/example/index.ts b/js/preview/test/newnewtext/frontend/example/index.ts new file mode 100644 index 000000000000..42397adf0d93 --- /dev/null +++ b/js/preview/test/newnewtext/frontend/example/index.ts @@ -0,0 +1 @@ +export { default } from "./Textbox.svelte"; diff --git a/js/preview/test/newnewtext/frontend/interactive/InteractiveTextbox.svelte b/js/preview/test/newnewtext/frontend/interactive/InteractiveTextbox.svelte new file mode 100644 index 000000000000..47226d4eebe2 --- /dev/null +++ b/js/preview/test/newnewtext/frontend/interactive/InteractiveTextbox.svelte @@ -0,0 +1,85 @@ + + + + + + {#if loading_status} + + {/if} + + gradio.dispatch("change", value)} + on:input={() => gradio.dispatch("input")} + on:submit={() => gradio.dispatch("submit")} + on:blur={() => gradio.dispatch("blur")} + on:select={(e) => gradio.dispatch("select", e.detail)} + on:focus={() => gradio.dispatch("focus")} + /> + diff --git a/js/preview/test/newnewtext/frontend/interactive/index.ts b/js/preview/test/newnewtext/frontend/interactive/index.ts new file mode 100644 index 000000000000..a036461c736f --- /dev/null +++ b/js/preview/test/newnewtext/frontend/interactive/index.ts @@ -0,0 +1 @@ +export { default } from "./InteractiveTextbox.svelte"; diff --git a/js/timeseries/package.json b/js/preview/test/newnewtext/frontend/package.json similarity index 55% rename from js/timeseries/package.json rename to js/preview/test/newnewtext/frontend/package.json index 318f949182bf..1d526c1e720c 100644 --- a/js/timeseries/package.json +++ b/js/preview/test/newnewtext/frontend/package.json @@ -1,6 +1,6 @@ { - "name": "@gradio/timeseries", - "version": "0.0.8", + "name": "newnewtext", + "version": "0.1.2", "description": "Gradio UI packages", "type": "module", "main": "index.svelte", @@ -18,17 +18,6 @@ "@gradio/atoms": "workspace:^", "@gradio/icons": "workspace:^", "@gradio/statustracker": "workspace:^", - "@gradio/theme": "workspace:^", - "@gradio/tooltip": "workspace:^", - "@gradio/upload": "workspace:^", - "@gradio/utils": "workspace:^", - "d3-dsv": "^3.0.1", - "d3-scale": "^4.0.2", - "d3-shape": "^3.2.0" - }, - "devDependencies": { - "@types/d3-dsv": "^3.0.0", - "@types/d3-scale": "^4.0.3", - "@types/d3-shape": "^3.1.1" + "@gradio/utils": "workspace:^" } } diff --git a/js/preview/test/newnewtext/frontend/shared/Dynamic.svelte b/js/preview/test/newnewtext/frontend/shared/Dynamic.svelte new file mode 100644 index 000000000000..cffa5b303e38 --- /dev/null +++ b/js/preview/test/newnewtext/frontend/shared/Dynamic.svelte @@ -0,0 +1,297 @@ + + + +