From 56245276e701f7e4f81228af6e523d4c305af4ed Mon Sep 17 00:00:00 2001 From: Abubakar Abid Date: Fri, 17 Feb 2023 18:53:21 -0600 Subject: [PATCH] Added diffusers demo with batching & some misc improvements to doc (#3224) * added diffusers demo * guide updates * changelog * update demo * notebook --- CHANGELOG.md | 1 + demo/diffusers_with_batching/requirements.txt | 3 +++ demo/diffusers_with_batching/run.ipynb | 1 + demo/diffusers_with_batching/run.py | 22 +++++++++++++++++++ guides/01_getting-started/02_key-features.md | 6 +++-- .../02_controlling-layout.md | 4 +++- 6 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 demo/diffusers_with_batching/requirements.txt create mode 100644 demo/diffusers_with_batching/run.ipynb create mode 100644 demo/diffusers_with_batching/run.py diff --git a/CHANGELOG.md b/CHANGELOG.md index e61f40722a..91b9bd9278 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3165](https://github. * Keep pnginfo metadata for gallery by [@wfng92](https://github.com/wfng92) in [PR 3150](https://github.com/gradio-app/gradio/pull/3150) * Add a section on how to run a Gradio app locally [@osanseviero](https://github.com/osanseviero) in [PR 3170](https://github.com/gradio-app/gradio/pull/3170) * Fixed typos in gradio events function documentation by [@vidalmaxime](https://github.com/vidalmaxime) in [PR 3168](https://github.com/gradio-app/gradio/pull/3168) +* Added an example using Gradio's batch mode with the diffusers library by [@abidlabs](https://github.com/abidlabs) in [PR 3224](https://github.com/gradio-app/gradio/pull/3224) ## Testing and Infrastructure Changes: No changes to highlight. diff --git a/demo/diffusers_with_batching/requirements.txt b/demo/diffusers_with_batching/requirements.txt new file mode 100644 index 0000000000..b6ca9fbe3c --- /dev/null +++ b/demo/diffusers_with_batching/requirements.txt @@ -0,0 +1,3 @@ +torch +transformers +diffusers \ No newline at end of file diff --git a/demo/diffusers_with_batching/run.ipynb b/demo/diffusers_with_batching/run.ipynb new file mode 100644 index 0000000000..9313209a63 --- /dev/null +++ b/demo/diffusers_with_batching/run.ipynb @@ -0,0 +1 @@ +{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: diffusers_with_batching"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers diffusers"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import torch\n", "from diffusers import DiffusionPipeline\n", "import gradio as gr\n", "\n", "generator = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n", "# move to GPU if available\n", "if torch.cuda.is_available():\n", " generator = generator.to(\"cuda\")\n", "\n", "def generate(prompts):\n", " images = generator(list(prompts)).images\n", " return [images]\n", "\n", "demo = gr.Interface(generate, \n", " \"textbox\", \n", " \"image\", \n", " batch=True, \n", " max_batch_size=4 # Set the batch size based on your CPU/GPU memory\n", ").queue()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5} \ No newline at end of file diff --git a/demo/diffusers_with_batching/run.py b/demo/diffusers_with_batching/run.py new file mode 100644 index 0000000000..6923d1c78b --- /dev/null +++ b/demo/diffusers_with_batching/run.py @@ -0,0 +1,22 @@ +import torch +from diffusers import DiffusionPipeline +import gradio as gr + +generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256") +# move to GPU if available +if torch.cuda.is_available(): + generator = generator.to("cuda") + +def generate(prompts): + images = generator(list(prompts)).images + return [images] + +demo = gr.Interface(generate, + "textbox", + "image", + batch=True, + max_batch_size=4 # Set the batch size based on your CPU/GPU memory +).queue() + +if __name__ == "__main__": + demo.launch() diff --git a/guides/01_getting-started/02_key-features.md b/guides/01_getting-started/02_key-features.md index 56242a3b35..d561565de4 100644 --- a/guides/01_getting-started/02_key-features.md +++ b/guides/01_getting-started/02_key-features.md @@ -258,6 +258,8 @@ demo.launch() In the example above, 16 requests could be processed in parallel (for a total inference time of 5 seconds), instead of each request being processed separately (for a total -inference time of 80 seconds). +inference time of 80 seconds). Many Hugging Face `transformers` and `diffusers` models +work very naturally with Gradio's batch mode: here's [an example demo using diffusers to +generate images in batches](https://github.com/gradio-app/gradio/blob/main/demo/diffusers_with_batching/run.py) -Supplying a generator into Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above). +Note: using batch functions with Gradio **requires** you to enable queuing in the underlying Interface or Blocks (see the queuing section above). diff --git a/guides/03_building-with-blocks/02_controlling-layout.md b/guides/03_building-with-blocks/02_controlling-layout.md index 1d2bc93b5f..deb70d0691 100644 --- a/guides/03_building-with-blocks/02_controlling-layout.md +++ b/guides/03_building-with-blocks/02_controlling-layout.md @@ -57,8 +57,10 @@ Both Components and Layout elements have a `visible` argument that can set initi $code_blocks_form $demo_blocks_form +## Variable Number of Outputs + By adjusting the visibility of components in a dynamic way, it is possible to create -machine learning demos that support *variable numbers of outputs*. Here's a simple example +demos with Gradio that support a *variable numbers of outputs*. Here's a very simple example where the number of output textboxes is controlled by an input slider: $code_variable_outputs