Fix various issues with demos on website (#6268)

* fix demos

* demos on landing page

* make code interactive on playground

* add changeset

* try new secret

* formatting

* fix fake_gan

* demo notebooks

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
Ali Abdalla 2023-11-02 17:35:07 -07:00 committed by GitHub
parent e32bac8944
commit de36820ef5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 81 additions and 1649 deletions

View File

@ -0,0 +1,6 @@
---
"gradio": minor
"website": minor
---
feat:Fix various issues with demos on website

View File

@ -62,7 +62,7 @@ jobs:
github.event.workflow_run.conclusion == 'success'
id: upload-website-demos
run: |
python scripts/upload_website_demos.py --AUTH_TOKEN ${{ secrets.SPACES_DEPLOY_TOKEN }} \
python scripts/upload_website_demos.py --AUTH_TOKEN ${{ secrets.WEBSITE_SPACES_DEPLOY_TOKEN }} \
--WHEEL_URL https://gradio-builds.s3.amazonaws.com/${{ steps.set-outputs.outputs.gh_sha }}/ \
--GRADIO_VERSION ${{ steps.set-outputs.outputs.gradio_version }}

File diff suppressed because one or more lines are too long

View File

@ -77,7 +77,7 @@ def segment(input):
import gradio as gr
i = gr.Image(shape=(112, 112), label="Echocardiogram")
i = gr.Image(label="Echocardiogram")
o = gr.Image(label="Segmentation Mask")
examples = [["img1.jpg"], ["img2.jpg"]]
@ -85,4 +85,4 @@ title = None #"Left Ventricle Segmentation"
description = "This semantic segmentation model identifies the left ventricle in echocardiogram images."
# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of Video-based AI for beat-to-beat assessment of cardiac function by Ouyang et al. in Nature, 2020."
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png"
gr.Interface(segment, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()
gr.Interface(segment, i, o, examples=examples, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.outputs.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.Image(type=\"pil\"),gr.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", value='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -27,8 +27,8 @@ examples=[['groot.jpeg','version 2 (🔺 robustness,🔻 stylization)'],['gongyo
demo = gr.Interface(
fn=inference,
inputs=[gr.inputs.Image(type="pil"),gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')],
outputs=gr.outputs.Image(type="pil"),
inputs=[gr.Image(type="pil"),gr.Radio(['version 1 (🔺 stylization, 🔻 robustness)','version 2 (🔺 robustness,🔻 stylization)'], type="value", value='version 2 (🔺 robustness,🔻 stylization)', label='version')],
outputs=gr.Image(type="pil"),
title=title,
description=description,
article=article,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

File diff suppressed because one or more lines are too long

View File

@ -1,157 +0,0 @@
import gradio as gr
from gradio.components import Markdown as md
from PIL import Image
demo = gr.Blocks()
io1a = gr.Interface(lambda x: x, gr.Image(), gr.Image())
io1b = gr.Interface(lambda x: x, gr.Image(source="webcam"), gr.Image())
io2a = gr.Interface(lambda x: x, gr.Image(source="canvas"), gr.Image())
io2b = gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())
io2c = gr.Interface(
lambda x: x, gr.Image(source="canvas", shape=(512, 512)), gr.Image()
)
io3a = gr.Interface(
lambda x: [x["mask"], x["image"]],
gr.Image(source="upload", tool="sketch"),
[gr.Image(), gr.Image()],
)
io3b = gr.Interface(
lambda x: [x["mask"], x["image"]],
gr.ImageMask(),
[gr.Image(), gr.Image()],
)
io3b2 = gr.Interface(
lambda x: [x["mask"], x["image"]],
gr.ImageMask(),
[gr.Image(), gr.Image()],
)
io3b3 = gr.Interface(
lambda x: [x["mask"], x["image"]],
gr.ImageMask(),
[gr.Image(), gr.Image()],
)
io3c = gr.Interface(
lambda x: [x["mask"], x["image"]],
gr.Image(source="webcam", tool="sketch"),
[gr.Image(), gr.Image()],
)
io4a = gr.Interface(
lambda x: x, gr.Image(source="canvas", tool="color-sketch"), gr.Image()
)
io4b = gr.Interface(lambda x: x, gr.Paint(), gr.Image())
io5a = gr.Interface(
lambda x: x, gr.Image(source="upload", tool="color-sketch"), gr.Image()
)
io5b = gr.Interface(lambda x: x, gr.ImagePaint(), gr.Image())
io5c = gr.Interface(
lambda x: x, gr.Image(source="webcam", tool="color-sketch"), gr.Image()
)
def save_image(image):
image.save("colorede.png")
return image
img = Image.new("RGB", (512, 512), (150, 150, 150))
img.save("image.png", "PNG")
io5d = gr.Interface(
save_image,
gr.Image("image.png", source="upload", tool="color-sketch", type="pil"),
gr.Image(),
)
with demo:
md("# Different Ways to Use the Image Input Component")
md(
"**1a. Standalone Image Upload: `gr.Interface(lambda x: x, gr.Image(), gr.Image())`**"
)
io1a.render()
md(
"**1b. Standalone Image from Webcam: `gr.Interface(lambda x: x, gr.Image(source='webcam'), gr.Image())`**"
)
io1b.render()
md(
"**2a. Black and White Sketchpad: `gr.Interface(lambda x: x, gr.Image(source='canvas'), gr.Image())`**"
)
io2a.render()
md(
"**2b. Black and White Sketchpad: `gr.Interface(lambda x: x, gr.Sketchpad(), gr.Image())`**"
)
io2b.render()
md("**2c. Black and White Sketchpad with `shape=(512,512)`**")
io2c.render()
md("**3a. Binary Mask with image upload:**")
md(
"""```python
gr.Interface(
lambda x: [x['mask'], x['image']],
gr.Image(source='upload', tool='sketch'),
[gr.Image(), gr.Image()],
)
```
"""
)
io3a.render()
md("**3b. Binary Mask with image upload:**")
md(
"""```python
gr.Interface(
lambda x: [x['mask'], x['image']],
gr.ImageMask(),
[gr.Image(), gr.Image()],
)
```
"""
)
io3b.render()
md("**3c. Binary Mask with webcam upload:**")
md(
"""```python
gr.Interface(
lambda x: [x['mask'], x['image']],
gr.Image(source='webcam', tool='sketch'),
[gr.Image(), gr.Image()],
)
```
"""
)
io3c.render()
md(
"**4a. Color Sketchpad: `gr.Interface(lambda x: x, gr.Image(source='canvas', tool='color-sketch'), gr.Image())`**"
)
io4a.render()
md("**4b. Color Sketchpad: `gr.Interface(lambda x: x, gr.Paint(), gr.Image())`**")
io4b.render()
md(
"**5a. Color Sketchpad with image upload: `gr.Interface(lambda x: x, gr.Image(source='upload', tool='color-sketch'), gr.Image())`**"
)
io5a.render()
md(
"**5b. Color Sketchpad with image upload: `gr.Interface(lambda x: x, gr.ImagePaint(), gr.Image())`**"
)
io5b.render()
md(
"**5c. Color Sketchpad with webcam upload: `gr.Interface(lambda x: x, gr.Image(source='webcam', tool='color-sketch'), gr.Image())`**"
)
io5c.render()
md("**Tabs**")
with gr.Tab("One"):
io3b2.render()
with gr.Tab("Two"):
io3b3.render()
md("**5d. Color Sketchpad with image upload and a default images**")
io5d.render()
if __name__ == "__main__":
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_webcam"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def snap(image):\n", " return np.flipud(image)\n", "\n", "\n", "demo = gr.Interface(snap, \"webcam\", \"image\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_webcam"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def snap(image):\n", " return np.flipud(image)\n", "\n", "\n", "demo = gr.Interface(snap, gr.Image(sources=[\"webcam\"]), \"image\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -7,7 +7,7 @@ def snap(image):
return np.flipud(image)
demo = gr.Interface(snap, "webcam", "image")
demo = gr.Interface(snap, gr.Image(sources=["webcam"]), "image")
if __name__ == "__main__":
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: cancel_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "\n", "\n", "def fake_diffusion(steps):\n", " for i in range(steps):\n", " print(f\"Current step: {i}\")\n", " time.sleep(1)\n", " yield str(i)\n", "\n", "\n", "def long_prediction(*args, **kwargs):\n", " time.sleep(10)\n", " return 42\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n", " run = gr.Button(value=\"Start Iterating\")\n", " output = gr.Textbox(label=\"Iterative Output\")\n", " stop = gr.Button(value=\"Stop Iterating\")\n", " with gr.Column():\n", " textbox = gr.Textbox(label=\"Prompt\")\n", " prediction = gr.Number(label=\"Expensive Calculation\")\n", " run_pred = gr.Button(value=\"Run Expensive Calculation\")\n", " with gr.Column():\n", " cancel_on_change = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Change\")\n", " cancel_on_submit = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Submit\")\n", " echo = gr.Textbox(label=\"Echo\")\n", " with gr.Row():\n", " with gr.Column():\n", " image = gr.Image(sources=[\"webcam\"], tool=\"editor\", label=\"Cancel on edit\", interactive=True)\n", " with gr.Column():\n", " video = gr.Video(sources=[\"webcam\"], label=\"Cancel on play\", interactive=True)\n", "\n", " click_event = run.click(fake_diffusion, n, output)\n", " stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n", " pred_event = run_pred.click(fn=long_prediction, inputs=[textbox], outputs=prediction)\n", "\n", " cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n", " cancel_on_submit.submit(lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event])\n", " image.edit(None, None, None, cancels=[click_event, pred_event])\n", " video.play(None, None, None, cancels=[click_event, pred_event])\n", "\n", " demo.queue(max_size=20)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: cancel_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import time\n", "import gradio as gr\n", "\n", "\n", "def fake_diffusion(steps):\n", " for i in range(steps):\n", " print(f\"Current step: {i}\")\n", " time.sleep(1)\n", " yield str(i)\n", "\n", "\n", "def long_prediction(*args, **kwargs):\n", " time.sleep(10)\n", " return 42\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " n = gr.Slider(1, 10, value=9, step=1, label=\"Number Steps\")\n", " run = gr.Button(value=\"Start Iterating\")\n", " output = gr.Textbox(label=\"Iterative Output\")\n", " stop = gr.Button(value=\"Stop Iterating\")\n", " with gr.Column():\n", " textbox = gr.Textbox(label=\"Prompt\")\n", " prediction = gr.Number(label=\"Expensive Calculation\")\n", " run_pred = gr.Button(value=\"Run Expensive Calculation\")\n", " with gr.Column():\n", " cancel_on_change = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Change\")\n", " cancel_on_submit = gr.Textbox(label=\"Cancel Iteration and Expensive Calculation on Submit\")\n", " echo = gr.Textbox(label=\"Echo\")\n", " with gr.Row():\n", " with gr.Column():\n", " image = gr.Image(sources=[\"webcam\"], label=\"Cancel on clear\", interactive=True)\n", " with gr.Column():\n", " video = gr.Video(sources=[\"webcam\"], label=\"Cancel on start recording\", interactive=True)\n", "\n", " click_event = run.click(fake_diffusion, n, output)\n", " stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])\n", " pred_event = run_pred.click(fn=long_prediction, inputs=[textbox], outputs=prediction)\n", "\n", " cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])\n", " cancel_on_submit.submit(lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event])\n", " image.clear(None, None, None, cancels=[click_event, pred_event])\n", " video.start_recording(None, None, None, cancels=[click_event, pred_event])\n", "\n", " demo.queue(max_size=20)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -31,9 +31,9 @@ with gr.Blocks() as demo:
echo = gr.Textbox(label="Echo")
with gr.Row():
with gr.Column():
image = gr.Image(sources=["webcam"], tool="editor", label="Cancel on edit", interactive=True)
image = gr.Image(sources=["webcam"], label="Cancel on clear", interactive=True)
with gr.Column():
video = gr.Video(sources=["webcam"], label="Cancel on play", interactive=True)
video = gr.Video(sources=["webcam"], label="Cancel on start recording", interactive=True)
click_event = run.click(fake_diffusion, n, output)
stop.click(fn=None, inputs=None, outputs=None, cancels=[click_event])
@ -41,8 +41,8 @@ with gr.Blocks() as demo:
cancel_on_change.change(None, None, None, cancels=[click_event, pred_event])
cancel_on_submit.submit(lambda s: s, cancel_on_submit, echo, cancels=[click_event, pred_event])
image.edit(None, None, None, cancels=[click_event, pred_event])
video.play(None, None, None, cancels=[click_event, pred_event])
image.clear(None, None, None, cancels=[click_event, pred_event])
video.start_recording(None, None, None, cancels=[click_event, pred_event])
demo.queue(max_size=20)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n", " ]\n", " ), f\"label {i}\" if i != 0 else \"label\" * 50)\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(variant=\"panel\"):\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\", scale=0)\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " , columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1151ce9f4b2043de0d2e3b7826127998.jpg\",\n", " \"http://www.marketingtool.online/en/face-generator/img/faces/avatar-116b5e92936b766b7fdfc242649337f7.jpg\",\n", " \"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1163530ca19b5cebe1b002b8ec67b6fc.jpg\",\n", " \"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1116395d6e6a6581eef8b8038f4c8e55.jpg\",\n", " \"http://www.marketingtool.online/en/face-generator/img/faces/avatar-11319be65db395d0e8e6855d18ddcef0.jpg\",\n", " ]\n", " ), f\"label {i}\")\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " , columns=[3], rows=[1], object_fit=\"contain\", height=\"auto\")\n", " btn = gr.Button(\"Generate images\", scale=0)\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -9,32 +9,23 @@ def fake_gan():
images = [
(random.choice(
[
"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80",
"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80",
"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80",
"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1151ce9f4b2043de0d2e3b7826127998.jpg",
"http://www.marketingtool.online/en/face-generator/img/faces/avatar-116b5e92936b766b7fdfc242649337f7.jpg",
"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1163530ca19b5cebe1b002b8ec67b6fc.jpg",
"http://www.marketingtool.online/en/face-generator/img/faces/avatar-1116395d6e6a6581eef8b8038f4c8e55.jpg",
"http://www.marketingtool.online/en/face-generator/img/faces/avatar-11319be65db395d0e8e6855d18ddcef0.jpg",
]
), f"label {i}" if i != 0 else "label" * 50)
), f"label {i}")
for i in range(3)
]
return images
with gr.Blocks() as demo:
with gr.Column(variant="panel"):
with gr.Row():
text = gr.Textbox(
label="Enter your prompt",
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
btn = gr.Button("Generate image", scale=0)
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
, columns=[2], rows=[2], object_fit="contain", height="auto")
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
, columns=[3], rows=[1], object_fit="contain", height="auto")
btn = gr.Button("Generate images", scale=0)
btn.click(fake_gan, None, gallery)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\",\n", " \"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg\",\n", " \"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip\",\n", " \"https://www.lifegate.com/app/uploads/ghepardo-primo-piano.jpg\",\n", " \"https://qph.cf2.quoracdn.net/main-qimg-0bbf31c18a22178cb7a8dd53640a3d05-lq\"\n", " ]\n", " gr.Gallery(value=cheetahs, columns=4)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\",\n", " \"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg\",\n", " \"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip\",\n", " ]\n", " gr.Gallery(value=cheetahs, columns=4)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -7,8 +7,6 @@ with gr.Blocks() as demo:
"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg",
"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg",
"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip",
"https://www.lifegate.com/app/uploads/ghepardo-primo-piano.jpg",
"https://qph.cf2.quoracdn.net/main-qimg-0bbf31c18a22178cb7a8dd53640a3d05-lq"
]
gr.Gallery(value=cheetahs, columns=4)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " imgs = gr.State()\n", " gallery = gr.Gallery()\n", "\n", " def generate_images():\n", " images = []\n", " for _ in range(9):\n", " image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n", " 0, 255, 3\n", " ) # image is a solid single color\n", " images.append(image)\n", " return images, images\n", "\n", " demo.load(generate_images, None, [gallery, imgs])\n", "\n", " with gr.Row():\n", " selected = gr.Number(show_label=False, placeholder=\"Selected\")\n", " darken_btn = gr.Button(\"Darken selected\")\n", "\n", " def get_select_index(evt: gr.SelectData):\n", " return evt.index\n", "\n", " gallery.select(get_select_index, None, selected)\n", "\n", " def darken_img(imgs, index):\n", " index = int(index)\n", " imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n", " return imgs, imgs\n", "\n", " darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gallery_selections"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "with gr.Blocks() as demo:\n", " imgs = gr.State()\n", " gallery = gr.Gallery()\n", "\n", " def generate_images():\n", " images = []\n", " for _ in range(9):\n", " image = np.ones((100, 100, 3), dtype=np.uint8) * np.random.randint(\n", " 0, 255, 3\n", " ) # image is a solid single color\n", " images.append(image)\n", " return images, images\n", "\n", " demo.load(generate_images, None, [gallery, imgs])\n", "\n", " with gr.Row():\n", " selected = gr.Number(show_label=False)\n", " darken_btn = gr.Button(\"Darken selected\")\n", "\n", " def get_select_index(evt: gr.SelectData):\n", " return evt.index\n", "\n", " gallery.select(get_select_index, None, selected)\n", "\n", " def darken_img(imgs, index):\n", " index = int(index)\n", " imgs[index] = np.round(imgs[index] * 0.8).astype(np.uint8)\n", " return imgs, imgs\n", "\n", " darken_btn.click(darken_img, [imgs, selected], [imgs, gallery])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -17,7 +17,7 @@ with gr.Blocks() as demo:
demo.load(generate_images, None, [gallery, imgs])
with gr.Row():
selected = gr.Number(show_label=False, placeholder="Selected")
selected = gr.Number(show_label=False)
darken_btn = gr.Button("Darken selected")
def get_select_index(evt: gr.SelectData):

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_english_german"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "english_translator = gr.Blocks.load(name=\"spaces/gradio/english_translator\")\n", "english_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n", "\n", "\n", "def generate_text(text):\n", " english_text = english_generator(text)[0][\"generated_text\"]\n", " german_text = english_translator(english_text)\n", " return english_text, german_text\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " seed = gr.Text(label=\"Input Phrase\")\n", " with gr.Column():\n", " english = gr.Text(label=\"Generated English Text\")\n", " german = gr.Text(label=\"Generated German Text\")\n", " btn = gr.Button(\"Generate\")\n", " btn.click(generate_text, inputs=[seed], outputs=[english, german])\n", " gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_english_german"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "english_translator = gr.load(name=\"spaces/gradio/english_translator\")\n", "english_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n", "\n", "\n", "def generate_text(text):\n", " english_text = english_generator(text)[0][\"generated_text\"]\n", " german_text = english_translator(english_text)\n", " return english_text, german_text\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " seed = gr.Text(label=\"Input Phrase\")\n", " with gr.Column():\n", " english = gr.Text(label=\"Generated English Text\")\n", " german = gr.Text(label=\"Generated German Text\")\n", " btn = gr.Button(\"Generate\")\n", " btn.click(generate_text, inputs=[seed], outputs=[english, german])\n", " gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -2,7 +2,7 @@ import gradio as gr
from transformers import pipeline
english_translator = gr.Blocks.load(name="spaces/gradio/english_translator")
english_translator = gr.load(name="spaces/gradio/english_translator")
english_generator = pipeline("text-generation", model="distilgpt2")

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classification\n", "### Simple image classification in Pytorch with Gradio's Image input and Label output.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_classification/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import requests\n", "from torchvision import transforms\n", "\n", "model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n", " return confidences\n", "\n", "demo = gr.Interface(fn=predict, \n", " inputs=gr.inputs.Image(type=\"pil\"),\n", " outputs=gr.outputs.Label(num_top_classes=3),\n", " examples=[[\"cheetah.jpg\"]],\n", " )\n", " \n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classification\n", "### Simple image classification in Pytorch with Gradio's Image input and Label output.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_classification/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import requests\n", "from torchvision import transforms\n", "\n", "model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "def predict(inp):\n", " inp = transforms.ToTensor()(inp).unsqueeze(0)\n", " with torch.no_grad():\n", " prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)\n", " confidences = {labels[i]: float(prediction[i]) for i in range(1000)} \n", " return confidences\n", "\n", "demo = gr.Interface(fn=predict, \n", " inputs=gr.Image(type=\"pil\"),\n", " outputs=gr.Label(num_top_classes=3),\n", " examples=[[\"cheetah.jpg\"]],\n", " )\n", " \n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -15,8 +15,8 @@ def predict(inp):
return confidences
demo = gr.Interface(fn=predict,
inputs=gr.inputs.Image(type="pil"),
outputs=gr.outputs.Label(num_top_classes=3),
inputs=gr.Image(type="pil"),
outputs=gr.Label(num_top_classes=3),
examples=[["cheetah.jpg"]],
)

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

View File

@ -1,2 +0,0 @@
numpy
tensorflow

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier_interpretation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_interpretation/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_interpretation/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier_interpretation/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "\n", "image = gr.Image(shape=(224, 224))\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image, inputs=image, outputs=label\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,28 +0,0 @@
import requests
import tensorflow as tf
import gradio as gr
inception_net = tf.keras.applications.MobileNetV2() # load the model
# Download human-readable labels for ImageNet.
response = requests.get("https://git.io/JJkYN")
labels = response.text.split("\n")
def classify_image(inp):
inp = inp.reshape((-1, 224, 224, 3))
inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
prediction = inception_net.predict(inp).flatten()
return {labels[i]: float(prediction[i]) for i in range(1000)}
image = gr.Image(shape=(224, 224))
label = gr.Label(num_top_classes=3)
demo = gr.Interface(
fn=classify_image, inputs=image, outputs=label
)
if __name__ == "__main__":
demo.launch()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 412 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 541 KiB

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: leaderboard\n", "### A simple dashboard ranking spaces by number of likes.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import requests\n", "import pandas as pd\n", "from huggingface_hub.hf_api import SpaceInfo\n", "path = f\"https://huggingface.co/api/spaces\"\n", "\n", "\n", "def get_blocks_party_spaces():\n", " r = requests.get(path)\n", " d = r.json()\n", " spaces = [SpaceInfo(**x) for x in d]\n", " blocks_spaces = {}\n", " for i in range(0,len(spaces)):\n", " if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n", " blocks_spaces[spaces[i].id]=spaces[i].likes\n", " df = pd.DataFrame(\n", " [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n", " df = df.sort_values(by=['likes'],ascending=False)\n", " return df\n", "\n", "block = gr.Blocks()\n", "\n", "with block: \n", " gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see <a href=\"https://huggingface.co/Gradio-Blocks\" target=\"_blank\" style=\"text-decoration: underline\">Blocks Party Event</a>\"\"\")\n", " with gr.Tabs():\n", " with gr.TabItem(\"Blocks Party Leaderboard\"):\n", " with gr.Row():\n", " data = gr.outputs.Dataframe(type=\"pandas\")\n", " with gr.Row():\n", " data_run = gr.Button(\"Refresh\")\n", " data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n", " # running the function on page load in addition to when the button is clicked\n", " block.load(get_blocks_party_spaces, inputs=None, outputs=data) \n", "\n", "block.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: leaderboard\n", "### A simple dashboard ranking spaces by number of likes.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import requests\n", "import pandas as pd\n", "from huggingface_hub.hf_api import SpaceInfo\n", "path = f\"https://huggingface.co/api/spaces\"\n", "\n", "\n", "def get_blocks_party_spaces():\n", " r = requests.get(path)\n", " d = r.json()\n", " spaces = [SpaceInfo(**x) for x in d]\n", " blocks_spaces = {}\n", " for i in range(0,len(spaces)):\n", " if spaces[i].id.split('/')[0] == 'Gradio-Blocks' and hasattr(spaces[i], 'likes') and spaces[i].id != 'Gradio-Blocks/Leaderboard' and spaces[i].id != 'Gradio-Blocks/README':\n", " blocks_spaces[spaces[i].id]=spaces[i].likes\n", " df = pd.DataFrame(\n", " [{\"Spaces_Name\": Spaces, \"likes\": likes} for Spaces,likes in blocks_spaces.items()])\n", " df = df.sort_values(by=['likes'],ascending=False)\n", " return df\n", "\n", "block = gr.Blocks()\n", "\n", "with block: \n", " gr.Markdown(\"\"\"Leaderboard for the most popular Blocks Event Spaces. To learn more and join, see <a href=\"https://huggingface.co/Gradio-Blocks\" target=\"_blank\" style=\"text-decoration: underline\">Blocks Party Event</a>\"\"\")\n", " with gr.Tabs():\n", " with gr.TabItem(\"Blocks Party Leaderboard\"):\n", " with gr.Row():\n", " data = gr.Dataframe(type=\"pandas\")\n", " with gr.Row():\n", " data_run = gr.Button(\"Refresh\")\n", " data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)\n", " # running the function on page load in addition to when the button is clicked\n", " block.load(get_blocks_party_spaces, inputs=None, outputs=data) \n", "\n", "block.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -25,7 +25,7 @@ with block:
with gr.Tabs():
with gr.TabItem("Blocks Party Leaderboard"):
with gr.Row():
data = gr.outputs.Dataframe(type="pandas")
data = gr.Dataframe(type="pandas")
with gr.Row():
data_run = gr.Button("Refresh")
data_run.click(get_blocks_party_spaces, inputs=None, outputs=data)

View File

@ -1,100 +0,0 @@
airplane
alarm_clock
anvil
apple
axe
baseball
baseball_bat
basketball
beard
bed
bench
bicycle
bird
book
bread
bridge
broom
butterfly
camera
candle
car
cat
ceiling_fan
cell_phone
chair
circle
clock
cloud
coffee_cup
cookie
cup
diving_board
donut
door
drums
dumbbell
envelope
eye
eyeglasses
face
fan
flower
frying_pan
grapes
hammer
hat
headphones
helmet
hot_dog
ice_cream
key
knife
ladder
laptop
light_bulb
lightning
line
lollipop
microphone
moon
mountain
moustache
mushroom
pants
paper_clip
pencil
pillow
pizza
power_outlet
radio
rainbow
rifle
saw
scissors
screwdriver
shorts
shovel
smiley_face
snake
sock
spider
spoon
square
star
stop_sign
suitcase
sun
sword
syringe
t-shirt
table
tennis_racquet
tent
tooth
traffic_light
tree
triangle
umbrella
wheel
wristwatch

View File

@ -1,3 +0,0 @@
torch
gdown
numpy

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: pictionary"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch gdown numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/pictionary/class_names.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["from pathlib import Path\n", "\n", "import numpy as np\n", "import torch\n", "import gradio as gr\n", "from torch import nn\n", "import gdown \n", "\n", "url = 'https://drive.google.com/uc?id=1dsk2JNZLRDjC-0J4wIQX_FcVurPaXaAZ'\n", "output = 'pytorch_model.bin'\n", "gdown.download(url, output, quiet=False)\n", "\n", "LABELS = Path('class_names.txt').read_text().splitlines()\n", "\n", "model = nn.Sequential(\n", " nn.Conv2d(1, 32, 3, padding='same'),\n", " nn.ReLU(),\n", " nn.MaxPool2d(2),\n", " nn.Conv2d(32, 64, 3, padding='same'),\n", " nn.ReLU(),\n", " nn.MaxPool2d(2),\n", " nn.Conv2d(64, 128, 3, padding='same'),\n", " nn.ReLU(),\n", " nn.MaxPool2d(2),\n", " nn.Flatten(),\n", " nn.Linear(1152, 256),\n", " nn.ReLU(),\n", " nn.Linear(256, len(LABELS)),\n", ")\n", "state_dict = torch.load('pytorch_model.bin', map_location='cpu')\n", "model.load_state_dict(state_dict, strict=False)\n", "model.eval()\n", "\n", "def predict(im):\n", " if im is None:\n", " return None\n", " im = np.asarray(im.resize((28, 28)))\n", " \n", " x = torch.tensor(im, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.\n", "\n", " with torch.no_grad():\n", " out = model(x)\n", "\n", " probabilities = torch.nn.functional.softmax(out[0], dim=0)\n", "\n", " values, indices = torch.topk(probabilities, 5)\n", "\n", " return {LABELS[i]: v.item() for i, v in zip(indices, values)}\n", "\n", "\n", "interface = gr.Interface(predict, \n", " inputs=gr.Sketchpad(label=\"Draw Here\", brush_radius=5, type=\"pil\", shape=(120, 120)), \n", " outputs=gr.Label(label=\"Guess\"), \n", " live=True)\n", "\n", "interface.queue().launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,56 +0,0 @@
from pathlib import Path
import numpy as np
import torch
import gradio as gr
from torch import nn
import gdown
url = 'https://drive.google.com/uc?id=1dsk2JNZLRDjC-0J4wIQX_FcVurPaXaAZ'
output = 'pytorch_model.bin'
gdown.download(url, output, quiet=False)
LABELS = Path('class_names.txt').read_text().splitlines()
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1152, 256),
nn.ReLU(),
nn.Linear(256, len(LABELS)),
)
state_dict = torch.load('pytorch_model.bin', map_location='cpu')
model.load_state_dict(state_dict, strict=False)
model.eval()
def predict(im):
if im is None:
return None
im = np.asarray(im.resize((28, 28)))
x = torch.tensor(im, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
with torch.no_grad():
out = model(x)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
values, indices = torch.topk(probabilities, 5)
return {LABELS[i]: v.item() for i, v in zip(indices, values)}
interface = gr.Interface(predict,
inputs=gr.Sketchpad(label="Draw Here", brush_radius=5, type="pil", shape=(120, 120)),
outputs=gr.Label(label="Guess"),
live=True)
interface.queue().launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: question-answering"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", "\n", "model_name = \"deepset/roberta-base-squad2\"\n", "\n", "nlp = pipeline(\"question-answering\", model=model_name, tokenizer=model_name)\n", "\n", "context = \"The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.\"\n", "question = \"Which continent is the Amazon rainforest in?\"\n", "\n", "\n", "def predict(context, question):\n", " res = nlp({\"question\": question, \"context\": context})\n", " return res[\"answer\"], res[\"score\"]\n", "\n", "\n", "gr.Interface(\n", " predict,\n", " inputs=[\n", " gr.inputs.Textbox(lines=7, default=context, label=\"Context Paragraph\"),\n", " gr.inputs.Textbox(lines=2, default=question, label=\"Question\"),\n", " ],\n", " outputs=[gr.outputs.Textbox(label=\"Answer\"), gr.outputs.Textbox(label=\"Score\")],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: question-answering"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", "\n", "model_name = \"deepset/roberta-base-squad2\"\n", "\n", "nlp = pipeline(\"question-answering\", model=model_name, tokenizer=model_name)\n", "\n", "context = \"The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.\"\n", "question = \"Which continent is the Amazon rainforest in?\"\n", "\n", "\n", "def predict(context, question):\n", " res = nlp({\"question\": question, \"context\": context})\n", " return res[\"answer\"], res[\"score\"]\n", "\n", "\n", "gr.Interface(\n", " predict,\n", " inputs=[\n", " gr.Textbox(lines=7, value=context, label=\"Context Paragraph\"),\n", " gr.Textbox(lines=2, value=question, label=\"Question\"),\n", " ],\n", " outputs=[gr.Textbox(label=\"Answer\"), gr.Textbox(label=\"Score\")],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -18,8 +18,8 @@ def predict(context, question):
gr.Interface(
predict,
inputs=[
gr.inputs.Textbox(lines=7, default=context, label="Context Paragraph"),
gr.inputs.Textbox(lines=2, default=question, label="Question"),
gr.Textbox(lines=7, value=context, label="Context Paragraph"),
gr.Textbox(lines=2, value=question, label="Question"),
],
outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")],
outputs=[gr.Textbox(label="Answer"), gr.Textbox(label="Score")],
).launch()

File diff suppressed because one or more lines are too long

View File

@ -70,10 +70,10 @@ def similarity_fn(path1, path2):
return output
inputs = [
gr.inputs.Audio(sources=["microphone"], type="filepath", optional=True, label="Speaker #1"),
gr.inputs.Audio(sources=["microphone"], type="filepath", optional=True, label="Speaker #2"),
gr.Audio(sources=["microphone"], type="filepath", label="Speaker #1"),
gr.Audio(sources=["microphone"], type="filepath", label="Speaker #2"),
]
output = gr.outputs.HTML(label="")
output = gr.HTML(label="")
description = (

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sepia_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img):\n", " sepia_filter = np.array([\n", " [0.393, 0.769, 0.189], \n", " [0.349, 0.686, 0.168], \n", " [0.272, 0.534, 0.131]\n", " ])\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "demo = gr.Interface(sepia, gr.Image(shape=(200, 200)), \"image\")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: sepia_filter"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img):\n", " sepia_filter = np.array([\n", " [0.393, 0.769, 0.189], \n", " [0.349, 0.686, 0.168], \n", " [0.272, 0.534, 0.131]\n", " ])\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "demo = gr.Interface(sepia, gr.Image(), \"image\")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -11,6 +11,6 @@ def sepia(input_img):
sepia_img /= sepia_img.max()
return sepia_img
demo = gr.Interface(sepia, gr.Image(shape=(200, 200)), "image")
demo = gr.Interface(sepia, gr.Image(), "image")
if __name__ == "__main__":
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip, \n", " gr.Image(source=\"webcam\", streaming=True), \n", " \"image\",\n", " live=True\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", " "]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip, \n", " gr.Image(sources=[\"webcam\"], streaming=True), \n", " \"image\",\n", " live=True\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", " "]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -6,7 +6,7 @@ def flip(im):
demo = gr.Interface(
flip,
gr.Image(source="webcam", streaming=True),
gr.Image(sources=["webcam"], streaming=True),
"image",
live=True
)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: text_generation\n", "### This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model='gpt2')\n", "\n", "def generate(text):\n", " result = generator(text, max_length=30, num_return_sequences=1)\n", " return result[0][\"generated_text\"]\n", "\n", "examples = [\n", " [\"The Moon's orbit around Earth has\"],\n", " [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=generate,\n", " inputs=gr.inputs.Textbox(lines=5, label=\"Input Text\"),\n", " outputs=gr.outputs.Textbox(label=\"Generated Text\"),\n", " examples=examples\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: text_generation\n", "### This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model='gpt2')\n", "\n", "def generate(text):\n", " result = generator(text, max_length=30, num_return_sequences=1)\n", " return result[0][\"generated_text\"]\n", "\n", "examples = [\n", " [\"The Moon's orbit around Earth has\"],\n", " [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=generate,\n", " inputs=gr.Textbox(lines=5, label=\"Input Text\"),\n", " outputs=gr.Textbox(label=\"Generated Text\"),\n", " examples=examples\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -14,8 +14,8 @@ examples = [
demo = gr.Interface(
fn=generate,
inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
outputs=gr.outputs.Textbox(label="Generated Text"),
inputs=gr.Textbox(lines=5, label="Input Text"),
outputs=gr.Textbox(label="Generated Text"),
examples=examples
)

File diff suppressed because one or more lines are too long

View File

@ -76,8 +76,8 @@ def similarity_fn(path1, path2):
inputs = [
gr.Audio(sources=["microphone"], type="filepath", optional=True, label="Speaker #1"),
gr.Audio(sources=["microphone"], type="filepath", optional=True, label="Speaker #2"),
gr.Audio(sources=["microphone"], type="filepath", label="Speaker #1"),
gr.Audio(sources=["microphone"], type="filepath", label="Speaker #2"),
]
output = gr.HTML(label="")
@ -108,7 +108,6 @@ demo = gr.Interface(
title="Voice Authentication with UniSpeech-SAT + X-Vectors",
description=description,
article=article,
layout="horizontal",
allow_flagging="never",
live=False,
examples=examples,

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: webcam"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["\n", "import gradio as gr\n", "\n", "\n", "def snap(image, video):\n", " return [image, video]\n", "\n", "\n", "demo = gr.Interface(\n", " snap,\n", " [gr.Image(source=\"webcam\", tool=None), gr.Video(sources=[\"webcam\"])],\n", " [\"image\", \"video\"],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: webcam"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["\n", "import gradio as gr\n", "\n", "\n", "def snap(image, video):\n", " return [image, video]\n", "\n", "\n", "demo = gr.Interface(\n", " snap,\n", " [gr.Image(sources=[\"webcam\"]), gr.Video(sources=[\"webcam\"])],\n", " [\"image\", \"video\"],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -8,7 +8,7 @@ def snap(image, video):
demo = gr.Interface(
snap,
[gr.Image(source="webcam", tool=None), gr.Video(sources=["webcam"])],
[gr.Image(sources=["webcam"]), gr.Video(sources=["webcam"])],
["image", "video"],
)

View File

@ -28,7 +28,7 @@ class Image(StreamingInput, Component):
Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image.
Examples-format: a {str} local filepath or URL to an image.
Demos: image_mod, image_mod_default_image
Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary_app, create-your-own-friends-with-a-gan
Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, create-your-own-friends-with-a-gan
"""
EVENTS = [

View File

@ -34,7 +34,7 @@ class Label(Component):
Postprocessing: expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess().
Demos: main_note, titanic_survival
Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary-app
Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers
"""
CONFIDENCES_KEY = "confidences"

View File

@ -1,106 +0,0 @@
# Building a Pictionary App
Related spaces: https://huggingface.co/spaces/nateraw/quickdraw
Tags: SKETCHPAD, LABELS, LIVE
## Introduction
How well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings.
Such models are perfect to use with Gradio's _sketchpad_ input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and it will look like the demo on the bottom of the page.
Let's get started! This guide covers how to build a pictionary app (step-by-step):
1. [Set up the Sketch Recognition Model](#1-set-up-the-sketch-recognition-model)
2. [Define a `predict` function](#2-define-a-predict-function)
3. [Create a Gradio Interface](#3-create-a-gradio-interface)
### Prerequisites
Make sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.
## 1. Set up the Sketch Recognition Model
First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin).
If you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:
```python
import torch
from torch import nn
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1152, 256),
nn.ReLU(),
nn.Linear(256, len(LABELS)),
)
state_dict = torch.load('pytorch_model.bin', map_location='cpu')
model.load_state_dict(state_dict, strict=False)
model.eval()
```
## 2. Define a `predict` function
Next, you will need to define a function that takes in the _user input_, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).
In the case of our pretrained model, it will look like this:
```python
from pathlib import Path
LABELS = Path('class_names.txt').read_text().splitlines()
def predict(img):
x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
with torch.no_grad():
out = model(x)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
values, indices = torch.topk(probabilities, 5)
confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
return confidences
```
Let's break this down. The function takes one parameters:
- `img`: the input image as a `numpy` array
Then, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:
- `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
## 3. Create a Gradio Interface
Now that we have our predictive function set up, we can create a Gradio Interface around it.
In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `"sketchpad"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.
The output component will be a `"label"`, which displays the top labels in a nice form.
Finally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:
```python
import gradio as gr
gr.Interface(fn=predict,
inputs="sketchpad",
outputs="label",
live=True).launch()
```
This produces the following interface, which you can try right here in your browser (try drawing something, like a "snake" or a "laptop"):
<gradio-app space="gradio/pictionary">
---
And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases 🧐

View File

@ -1,108 +0,0 @@
# 构建一个 Pictionary 应用程序
相关空间https://huggingface.co/spaces/nateraw/quickdraw
标签SKETCHPADLABELSLIVE
## 简介
一个算法能够有多好地猜出你在画什么几年前Google 发布了 **Quick Draw** 数据集,其中包含人类绘制的各种物体的图画。研究人员使用这个数据集训练模型来猜测 Pictionary 风格的图画。
这样的模型非常适合与 Gradio 的 _sketchpad_ 输入一起使用,因此在本教程中,我们将使用 Gradio 构建一个 Pictionary 网络应用程序。我们将能够完全使用 Python 构建整个网络应用程序,并且将如下所示(尝试画点什么!):
<iframe src="https://abidlabs-draw2.hf.space" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
让我们开始吧!本指南介绍了如何构建一个 pictionary 应用程序(逐步):
1. [设置 Sketch Recognition 模型](#1-set-up-the-sketch-recognition-model)
2. [定义 `predict` 函数](#2-define-a-predict-function)
3. [创建 Gradio 界面](#3-create-a-gradio-interface)
### 先决条件
确保您已经[安装](/getting_started)了 `gradio` Python 包。要使用预训练的草图模型,还需要安装 `torch`
## 1. 设置 Sketch Recognition 模型
首先,您将需要一个草图识别模型。由于许多研究人员已经在 Quick Draw 数据集上训练了自己的模型,在本教程中,我们将使用一个预训练模型。我们的模型是一个由 Nate Raw 训练的轻量级 1.5MB 模型,您可以在此处[下载](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin)。
如果您感兴趣,这是用于训练模型的[代码](https://github.com/nateraw/quickdraw-pytorch)。我们将简单地使用 PyTorch 加载预训练的模型,如下所示:
```python
import torch
from torch import nn
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, padding='same'),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
nn.Linear(1152, 256),
nn.ReLU(),
nn.Linear(256, len(LABELS)),
)
state_dict = torch.load('pytorch_model.bin', map_location='cpu')
model.load_state_dict(state_dict, strict=False)
model.eval()
```
## 2. 定义 `predict` 函数
接下来,您需要定义一个函数,该函数接受*用户输入*(在本例中是一个涂鸦图像)并返回预测结果。预测结果应该作为一个字典返回,其中键是类名,值是置信度概率。我们将从这个[文本文件](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt)加载类名。
对于我们的预训练模型,代码如下所示:
```python
from pathlib import Path
LABELS = Path('class_names.txt').read_text().splitlines()
def predict(img):
x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
with torch.no_grad():
out = model(x)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
values, indices = torch.topk(probabilities, 5)
confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
return confidences
```
让我们分解一下。该函数接受一个参数:
- `img`:输入图像,作为一个 `numpy` 数组
然后,函数将图像转换为 PyTorch 的 `tensor`,将其通过模型,并返回:
- `confidences`:前五个预测的字典,其中键是类别标签,值是置信度概率
## 3. 创建一个 Gradio 界面
现在我们已经设置好预测函数,我们可以在其周围创建一个 Gradio 界面。
在本例中,输入组件是一个 `sketchpad`,使用方便的字符串快捷方式 `"sketchpad"` 创建一个用户可以在其上绘制的画布,并处理将其转换为 numpy 数组的预处理。
输出组件将是一个 `"label"`,以良好的形式显示前几个标签。
最后,我们将添加一个额外的参数,设置 `live=True`允许我们的界面实时运行每当用户在涂鸦板上绘制时就会调整其预测结果。Gradio 的代码如下所示:
```python
import gradio as gr
gr.Interface(fn=predict,
inputs="sketchpad",
outputs="label",
live=True).launch()
```
这将产生以下界面,您可以在浏览器中尝试(尝试画一些东西,比如 "snake" 或 "laptop"
<iframe src="https://abidlabs-draw2.hf.space" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
---
完成!这就是构建一个 Pictionary 风格的猜词游戏所需的所有代码。玩得开心,并尝试找到一些边缘情况🧐

View File

@ -1,9 +1,10 @@
export const sketch = `<pre class="language-python"><code class="language-python"><span class="token keyword">import</span> gradio <span class="token keyword">as</span> gr
<span class="token keyword">def</span> <span class="token function">sketch_recognition</span><span class="token punctuation">(</span>img<span class="token punctuation">)</span><span class="token punctuation">:</span>
<span class="token keyword">pass</span><span class="token comment"> # Implement your sketch recognition model here...</span>
export const hello = `<pre class="language-python"><code class="language-python"><span class="token keyword">import</span> gradio <span class="token keyword">as</span> gr
gr<span class="token punctuation">.</span>Interface<span class="token punctuation">(</span>fn<span class="token operator">=</span>sketch_recognition<span class="token punctuation">,</span> inputs<span class="token operator">=</span><span class="token string">"sketchpad"</span><span class="token punctuation">,</span> outputs<span class="token operator">=</span><span class="token string">"label"</span><span class="token punctuation">).</span>launch<span class="token punctuation">(</span><span class="token punctuation">)</span>
</code></pre>`;
<span class="token keyword">def</span> <span class="token function">greet</span><span class="token punctuation">(</span>name<span class="token punctuation">)</span><span class="token punctuation">:</span>
<span class="token keyword">return</span> <span class="token string">"Hello "</span> <span class="token operator">+</span> name <span class="token operator">+</span> <span class="token string">"!"</span>
demo <span class="token operator">=</span> gr<span class="token punctuation">.</span>Interface<span class="token punctuation">(</span>fn<span class="token operator">=</span>greet<span class="token punctuation">,</span> inputs<span class="token operator">=</span><span class="token string">"text"</span><span class="token punctuation">,</span> outputs<span class="token operator">=</span><span class="token string">"text"</span><span class="token punctuation">)</span>
demo<span class="token punctuation">.</span>launch<span class="token punctuation">(</span><span class="token punctuation">)</span> </code></pre>`;
export const chat = `<pre class="language-python"><code class="language-python"><span class="token keyword">import</span> gradio <span class="token keyword">as</span> gr
<span class="token keyword">def</span> <span class="token function">chat</span><span class="token punctuation">(</span>message<span class="token punctuation">, </span>history<span class="token punctuation"></span>)<span class="token punctuation">:</span>

View File

@ -1,26 +1,26 @@
<script lang="ts">
import { sketch, chat } from "../assets/demo_code";
import { hello } from "../assets/demo_code";
let tabs = [
{
title: "Sketch Recognition",
code: sketch,
demo: "gradio/pictionary"
title: "Hello World",
code: hello,
demo: "gradio/hello_world"
},
{
title: "Time Series Forecasting",
title: "Airbnb Map",
code: false,
demo: "gradio/timeseries-forecasting-with-prophet"
demo: "gradio/map_airbnb"
},
{
title: "XGBoost with Explainability",
title: "Chatbot Streaming",
code: false,
demo: "gradio/xgboost-income-prediction-with-explainability"
demo: "gradio/chatinterface_streaming_echo"
},
{
title: "Chat with Llama 2",
code: chat,
demo: "ysharma/Explore_llamav2_with_TGI"
title: "Diffusion Faces",
code: false,
demo: "gradio/fake_gan"
}
];

View File

@ -110,6 +110,7 @@
target={dummy_elem}
gradio={dummy_gradio}
lines={10}
interactive="true"
/>
</div>
{/each}

View File

@ -13,7 +13,6 @@ def copy_all_demos(source_dir: str, dest_dir: str):
"blocks_group",
"blocks_js_methods",
"blocks_layout",
"blocks_mask",
"blocks_multiple_event_triggers",
"blocks_update",
"calculator",