Minor bug fix sweep (#5284)

* changes

* changes

* add changeset

* changes

* changes

* fix box changes on website

* add changeset

* changes

* changes

* Revert "changes"

This reverts commit 189b4e844a9d30a45f4d4fb855397ae592a9b725.

* chanegs

* changes

* changes

* changes

* changes

* add changeset

* Update fancy-bats-deny.md

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
Co-authored-by: aliabd <ali.si3luwa@gmail.com>
Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
aliabid94 2023-08-25 11:31:18 -07:00 committed by GitHub
parent df090e89f7
commit 5f25eb6836
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 171 additions and 167 deletions

View File

@ -0,0 +1,10 @@
---
"@gradio/highlightedtext": patch
"gradio": patch
"website": patch
---
feat:Minor bug fix sweep
- Our use of __exit__ was catching errors and corrupting the traceback of any component that failed to instantiate (try running blocks_kitchen_sink off main for an example). Now the __exit__ exits immediately if there's been an exception, so the original exception can be printed cleanly
- HighlightedText was rendering weird, cleaned it up

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: barplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "\n", "simple = pd.DataFrame(\n", " {\n", " \"item\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n", " \"inventory\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n", " }\n", ")\n", "\n", "with gr.Blocks() as demo:\n", " gr.BarPlot(value=simple, x=\"item\", y=\"inventory\", title=\"Simple Bar Plot\").style(\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: barplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "\n", "simple = pd.DataFrame(\n", " {\n", " \"item\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\"],\n", " \"inventory\": [28, 55, 43, 91, 81, 53, 19, 87, 52],\n", " }\n", ")\n", "\n", "with gr.Blocks() as demo:\n", " gr.BarPlot(\n", " value=simple,\n", " x=\"item\",\n", " y=\"inventory\",\n", " title=\"Simple Bar Plot\",\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -9,7 +9,11 @@ simple = pd.DataFrame(
)
with gr.Blocks() as demo:
gr.BarPlot(value=simple, x="item", y="inventory", title="Simple Bar Plot").style(
gr.BarPlot(
value=simple,
x="item",
y="inventory",
title="Simple Bar Plot",
container=False,
)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox.update(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox.update(lines=8, visible=True)\n", " else:\n", " return gr.Textbox.update(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)\n", "\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_essay"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def change_textbox(choice):\n", " if choice == \"short\":\n", " return gr.Textbox.update(lines=2, visible=True)\n", " elif choice == \"long\":\n", " return gr.Textbox.update(lines=8, visible=True)\n", " else:\n", " return gr.Textbox.update(visible=False)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " radio = gr.Radio(\n", " [\"short\", \"long\", \"none\"], label=\"What kind of essay would you like to write?\"\n", " )\n", " text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)\n", "\n", " radio.change(fn=change_textbox, inputs=radio, outputs=text)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -14,7 +14,7 @@ with gr.Blocks() as demo:
radio = gr.Radio(
["short", "long", "none"], label="What kind of essay would you like to write?"
)
text = gr.Textbox(lines=2, interactive=True).style(show_copy_button=True)
text = gr.Textbox(lines=2, interactive=True, show_copy_button=True)
radio.change(fn=change_textbox, inputs=radio, outputs=text)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_flashcards"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import random\n", "\n", "import gradio as gr\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " \"Load the flashcards in the table below, then use the Practice tab to practice.\"\n", " )\n", "\n", " with gr.Tab(\"Word Bank\"):\n", " flashcards_table = gr.Dataframe(headers=[\"front\", \"back\"], type=\"array\")\n", " with gr.Tab(\"Practice\"):\n", " with gr.Row():\n", " with gr.Column():\n", " front = gr.Textbox(label=\"Prompt\")\n", " with gr.Row():\n", " new_btn = gr.Button(\"New Card\").style(full_width=True)\n", " flip_btn = gr.Button(\"Flip Card\").style(full_width=True)\n", " with gr.Column(visible=False) as answer_col:\n", " back = gr.Textbox(label=\"Answer\")\n", " selected_card = gr.State()\n", " with gr.Row():\n", " correct_btn = gr.Button(\n", " \"Correct\",\n", " ).style(full_width=True)\n", " incorrect_btn = gr.Button(\"Incorrect\").style(full_width=True)\n", "\n", " with gr.Tab(\"Results\"):\n", " results = gr.State(value={})\n", " correct_field = gr.Markdown(\"# Correct: 0\")\n", " incorrect_field = gr.Markdown(\"# Incorrect: 0\")\n", " gr.Markdown(\"Card Statistics: \")\n", " results_table = gr.Dataframe(headers=[\"Card\", \"Correct\", \"Incorrect\"])\n", "\n", " def load_new_card(flashcards):\n", " card = random.choice(flashcards)\n", " return (\n", " card,\n", " card[0],\n", " gr.Column.update(visible=False),\n", " )\n", "\n", " new_btn.click(\n", " load_new_card,\n", " [flashcards_table],\n", " [selected_card, front, answer_col],\n", " )\n", "\n", " def flip_card(card):\n", " return card[1], gr.Column.update(visible=True)\n", "\n", " flip_btn.click(flip_card, [selected_card], [back, answer_col])\n", "\n", " def mark_correct(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][0] += 1\n", " correct_count = sum(result[0] for result in results.values())\n", " return (\n", " results,\n", " f\"# Correct: {correct_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " def mark_incorrect(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][1] += 1\n", " incorrect_count = sum(result[1] for result in results.values())\n", " return (\n", " results,\n", " f\"# Inorrect: {incorrect_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " correct_btn.click(\n", " mark_correct,\n", " [selected_card, results],\n", " [results, correct_field, results_table],\n", " )\n", "\n", " incorrect_btn.click(\n", " mark_incorrect,\n", " [selected_card, results],\n", " [results, incorrect_field, results_table],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_flashcards"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import random\n", "\n", "import gradio as gr\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " gr.Markdown(\n", " \"Load the flashcards in the table below, then use the Practice tab to practice.\"\n", " )\n", "\n", " with gr.Tab(\"Word Bank\"):\n", " flashcards_table = gr.Dataframe(headers=[\"front\", \"back\"], type=\"array\")\n", " with gr.Tab(\"Practice\"):\n", " with gr.Row():\n", " with gr.Column():\n", " front = gr.Textbox(label=\"Prompt\")\n", " with gr.Row():\n", " new_btn = gr.Button(\"New Card\")\n", " flip_btn = gr.Button(\"Flip Card\")\n", " with gr.Column(visible=False) as answer_col:\n", " back = gr.Textbox(label=\"Answer\")\n", " selected_card = gr.State()\n", " with gr.Row():\n", " correct_btn = gr.Button(\"Correct\")\n", " incorrect_btn = gr.Button(\"Incorrect\")\n", "\n", " with gr.Tab(\"Results\"):\n", " results = gr.State(value={})\n", " correct_field = gr.Markdown(\"# Correct: 0\")\n", " incorrect_field = gr.Markdown(\"# Incorrect: 0\")\n", " gr.Markdown(\"Card Statistics: \")\n", " results_table = gr.Dataframe(headers=[\"Card\", \"Correct\", \"Incorrect\"])\n", "\n", " def load_new_card(flashcards):\n", " card = random.choice(flashcards)\n", " return (\n", " card,\n", " card[0],\n", " gr.Column.update(visible=False),\n", " )\n", "\n", " new_btn.click(\n", " load_new_card,\n", " [flashcards_table],\n", " [selected_card, front, answer_col],\n", " )\n", "\n", " def flip_card(card):\n", " return card[1], gr.Column.update(visible=True)\n", "\n", " flip_btn.click(flip_card, [selected_card], [back, answer_col])\n", "\n", " def mark_correct(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][0] += 1\n", " correct_count = sum(result[0] for result in results.values())\n", " return (\n", " results,\n", " f\"# Correct: {correct_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " def mark_incorrect(card, results):\n", " if card[0] not in results:\n", " results[card[0]] = [0, 0]\n", " results[card[0]][1] += 1\n", " incorrect_count = sum(result[1] for result in results.values())\n", " return (\n", " results,\n", " f\"# Inorrect: {incorrect_count}\",\n", " [[front, scores[0], scores[1]] for front, scores in results.items()],\n", " )\n", "\n", " correct_btn.click(\n", " mark_correct,\n", " [selected_card, results],\n", " [results, correct_field, results_table],\n", " )\n", "\n", " incorrect_btn.click(\n", " mark_incorrect,\n", " [selected_card, results],\n", " [results, incorrect_field, results_table],\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -16,16 +16,14 @@ with demo:
with gr.Column():
front = gr.Textbox(label="Prompt")
with gr.Row():
new_btn = gr.Button("New Card").style(full_width=True)
flip_btn = gr.Button("Flip Card").style(full_width=True)
new_btn = gr.Button("New Card")
flip_btn = gr.Button("Flip Card")
with gr.Column(visible=False) as answer_col:
back = gr.Textbox(label="Answer")
selected_card = gr.State()
with gr.Row():
correct_btn = gr.Button(
"Correct",
).style(full_width=True)
incorrect_btn = gr.Button("Incorrect").style(full_width=True)
correct_btn = gr.Button("Correct")
incorrect_btn = gr.Button("Incorrect")
with gr.Tab("Results"):
results = gr.State(value={})

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_joined"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/blocks_joined/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["from time import sleep\n", "import gradio as gr\n", "import os\n", "\n", "cheetah = os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\")\n", "\n", "\n", "def img(text):\n", " sleep(3)\n", " return [\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " ]\n", "\n", "\n", "with gr.Blocks(css=\".container { max-width: 800px; margin: auto; }\") as demo:\n", " gr.Markdown(\"<h1><center>DALL\u00b7E mini</center></h1>\")\n", " gr.Markdown(\n", " \"DALL\u00b7E mini is an AI model that generates images from any prompt you give!\"\n", " )\n", " with gr.Group():\n", " with gr.Box():\n", " with gr.Row().style(equal_height=True):\n", "\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\", show_label=False, max_lines=1\n", " ).style(\n", " border=(True, False, True, True),\n", " rounded=(True, False, False, True),\n", " container=False,\n", " )\n", " btn = gr.Button(\"Run\").style(\n", " margin=False,\n", " rounded=(False, True, True, False),\n", " )\n", " gallery = gr.Gallery(label=\"Generated images\", show_label=False).style(\n", " grid=(\n", " 1,\n", " 3,\n", " ),\n", " height=\"auto\",\n", " )\n", " btn.click(img, inputs=text, outputs=gallery)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n", "\n", "# margin = (TOP, RIGHT, BOTTOM, LEFT)\n", "# rounded = (TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_joined"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/blocks_joined/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["from time import sleep\n", "import gradio as gr\n", "import os\n", "\n", "cheetah = os.path.join(os.path.abspath(''), \"files/cheetah1.jpg\")\n", "\n", "\n", "def img(text):\n", " sleep(3)\n", " return [\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " cheetah,\n", " ]\n", "\n", "\n", "with gr.Blocks(css=\".container { max-width: 800px; margin: auto; }\") as demo:\n", " gr.Markdown(\"<h1><center>DALL\u00b7E mini</center></h1>\")\n", " gr.Markdown(\n", " \"DALL\u00b7E mini is an AI model that generates images from any prompt you give!\"\n", " )\n", " with gr.Group():\n", " with gr.Row(equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " container=False,\n", " )\n", " btn = gr.Button(\"Run\", scale=0)\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " columns=(1, 3),\n", " height=\"auto\",\n", " )\n", " btn.click(img, inputs=text, outputs=gallery)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n", "\n", "# margin = (TOP, RIGHT, BOTTOM, LEFT)\n", "# rounded = (TOPLEFT, TOPRIGHT, BOTTOMRIGHT, BOTTOMLEFT)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -26,25 +26,17 @@ with gr.Blocks(css=".container { max-width: 800px; margin: auto; }") as demo:
"DALL·E mini is an AI model that generates images from any prompt you give!"
)
with gr.Group():
with gr.Box():
with gr.Row().style(equal_height=True):
text = gr.Textbox(
label="Enter your prompt", show_label=False, max_lines=1
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
btn = gr.Button("Run").style(
margin=False,
rounded=(False, True, True, False),
)
gallery = gr.Gallery(label="Generated images", show_label=False).style(
grid=(
1,
3,
),
with gr.Row(equal_height=True):
text = gr.Textbox(
label="Enter your prompt",
max_lines=1,
container=False,
)
btn = gr.Button("Run", scale=0)
gallery = gr.Gallery(
label="Generated images",
show_label=False,
columns=(1, 3),
height="auto",
)
btn.click(img, inputs=text, outputs=gallery)

File diff suppressed because one or more lines are too long

View File

@ -25,7 +25,7 @@ with gr.Blocks(theme=base_theme) as demo:
- Custom progress bar
"""
)
toggle_dark = gr.Button("Toggle Dark").style(full_width=False)
toggle_dark = gr.Button("Toggle Dark", scale=0)
toggle_dark.click(
None,
_js="""
@ -112,8 +112,10 @@ with gr.Blocks(theme=base_theme) as demo:
check = gr.Checkbox(label="Go")
with gr.Column(variant="panel", scale=2):
img = gr.Image(
"https://gradio.app/assets/img/header-image.jpg", label="Image"
).style(height=320)
"https://picsum.photos/536/354",
label="Image",
height=320,
)
with gr.Row():
go_btn = gr.Button("Go", label="Primary Button", variant="primary")
clear_btn = gr.Button(
@ -133,10 +135,10 @@ with gr.Blocks(theme=base_theme) as demo:
clear_btn.click(clear, None, img)
with gr.Row():
btn1 = gr.Button("Button 1").style(size="sm")
btn2 = gr.UploadButton().style(size="sm")
stop_btn = gr.Button("Stop", label="Stop Button", variant="stop").style(
size="sm"
btn1 = gr.Button("Button 1", size="sm")
btn2 = gr.UploadButton(size="sm")
stop_btn = gr.Button(
"Stop", label="Stop Button", variant="stop", size="sm"
)
gr.Examples(
@ -236,7 +238,7 @@ with gr.Blocks(theme=base_theme) as demo:
highlight,
chatbot,
gallery,
tabs
tabs,
]
def select_data(evt: gr.SelectData):
@ -258,7 +260,7 @@ with gr.Blocks(theme=base_theme) as demo:
component_example_set = [
(gr.Audio(render=False), join(KS_FILES, "cantina.wav")),
(gr.Checkbox(render=False), True),
(gr.CheckboxGroup(render=False), ["A", "B"]),
(gr.CheckboxGroup(render=False, choices=["A", "B"]), ["A", "B"]),
(gr.ColorPicker(render=False), "#FF0000"),
(gr.Dataframe(render=False), [[1, 2, 3], [4, 5, 6]]),
(gr.Dropdown(render=False), "A"),
@ -284,4 +286,4 @@ with gr.Blocks(theme=base_theme) as demo:
if __name__ == "__main__":
demo.launch(file_directories=[KS_FILES])
demo.launch(allowed_paths=[KS_FILES])

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_layout"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " with gr.Row():\n", " gr.Image(interactive=True, scale=2)\n", " gr.Image()\n", " with gr.Row():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\", scale=2)\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " with gr.Row():\n", " gr.Button(\"500\", scale=0, min_width=500)\n", " gr.Button(\"A\").style(full_width=False)\n", " gr.Button(\"grow\")\n", " with gr.Row():\n", " gr.Textbox()\n", " gr.Textbox()\n", " gr.Button() \n", " with gr.Row():\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " gr.Image()\n", " with gr.Column():\n", " gr.Image(interactive=True)\n", " gr.Image()\n", " gr.Image()\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_layout"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " with gr.Row():\n", " gr.Image(interactive=True, scale=2)\n", " gr.Image()\n", " with gr.Row():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\", scale=2)\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " with gr.Row():\n", " gr.Button(\"500\", scale=0, min_width=500)\n", " gr.Button(\"A\", scale=0)\n", " gr.Button(\"grow\")\n", " with gr.Row():\n", " gr.Textbox()\n", " gr.Textbox()\n", " gr.Button() \n", " with gr.Row():\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", " gr.Image()\n", " with gr.Column():\n", " gr.Image(interactive=True)\n", " gr.Image()\n", " gr.Image()\n", " gr.Textbox(label=\"Text\")\n", " gr.Number(label=\"Count\")\n", " gr.Radio(choices=[\"One\", \"Two\"])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -13,7 +13,7 @@ with demo:
gr.Radio(choices=["One", "Two"])
with gr.Row():
gr.Button("500", scale=0, min_width=500)
gr.Button("A").style(full_width=False)
gr.Button("A", scale=0)
gr.Button("grow")
with gr.Row():
gr.Textbox()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " ).style(\n", " item_container=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " label = gr.Label().style(\n", " container=False,\n", " )\n", " highlight = gr.HighlightedText(\n", " \"+ hello. - goodbye\",\n", " show_label=False,\n", " ).style(color_map={\"+\": \"green\", \"-\": \"red\"}, container=False)\n", " json = gr.JSON().style(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery().style(\n", " grid=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")]).style(color_map=(\"pink\", \"blue\"))\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\").style(\n", " full_width=True,\n", " )\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " ts = gr.Timeseries(show_label=False)\n", " label = gr.Label(container=False)\n", " highlight = gr.HighlightedText(\n", " [(\"hello\", None), (\"goodbye\", \"-\")],\n", " color_map={\"+\": \"green\", \"-\": \"red\"},\n", " container=False,\n", " )\n", " json = gr.JSON(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery(\n", " columns=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")])\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\")\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -12,9 +12,9 @@ with gr.Blocks(title="Styling Examples") as demo:
show_label=False,
)
radio = gr.Radio(
label="Radio", choices=["One", "Two", "Three"], show_label=False
).style(
item_container=False,
label="Radio",
choices=["One", "Two", "Three"],
show_label=False,
)
drop = gr.Dropdown(
label="Dropdown", choices=["One", "Two", "Three"], show_label=False
@ -25,21 +25,20 @@ with gr.Blocks(title="Styling Examples") as demo:
file = gr.File(show_label=False)
df = gr.Dataframe(show_label=False)
ts = gr.Timeseries(show_label=False)
label = gr.Label().style(
label = gr.Label(container=False)
highlight = gr.HighlightedText(
[("hello", None), ("goodbye", "-")],
color_map={"+": "green", "-": "red"},
container=False,
)
highlight = gr.HighlightedText(
"+ hello. - goodbye",
show_label=False,
).style(color_map={"+": "green", "-": "red"}, container=False)
json = gr.JSON().style(container=False)
json = gr.JSON(container=False)
html = gr.HTML(show_label=False)
gallery = gr.Gallery().style(
grid=(3, 3, 1),
gallery = gr.Gallery(
columns=(3, 3, 1),
height="auto",
container=False,
)
chat = gr.Chatbot([("hi", "good bye")]).style(color_map=("pink", "blue"))
chat = gr.Chatbot([("hi", "good bye")])
model = gr.Model3D()
@ -47,9 +46,7 @@ with gr.Blocks(title="Styling Examples") as demo:
highlight = gr.HighlightedText()
btn = gr.Button("Run").style(
full_width=True,
)
btn = gr.Button("Run")
gr.Dataset(components=[txt, num])

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/avatar.png"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.update(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot([], elem_id=\"chatbot\", height=750, avatar_images=(None, (os.path.join(os.path.abspath(''), \"avatar.png\"))))\n", "\n", " with gr.Row():\n", " with gr.Column(scale=0.85):\n", " txt = gr.Textbox(\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " container=False)\n", " with gr.Column(scale=0.15, min_width=0):\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/avatar.png"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def add_text(history, text):\n", " history = history + [(text, None)]\n", " return history, gr.update(value=\"\", interactive=False)\n", "\n", "\n", "def add_file(history, file):\n", " history = history + [((file.name,), None)]\n", " return history\n", "\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"avatar.png\"))),\n", " )\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(\n", " scale=4,\n", " show_label=False,\n", " placeholder=\"Enter text and press enter, or upload an image\",\n", " container=False,\n", " )\n", " btn = gr.UploadButton(\"\ud83d\udcc1\", file_types=[\"image\", \"video\", \"audio\"])\n", "\n", " txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", " txt_msg.then(lambda: gr.update(interactive=True), None, [txt], queue=False)\n", " file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(\n", " bot, chatbot, chatbot\n", " )\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -4,6 +4,7 @@ import time
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
def add_text(history, text):
history = history + [(text, None)]
return history, gr.update(value="", interactive=False)
@ -24,16 +25,20 @@ def bot(history):
with gr.Blocks() as demo:
chatbot = gr.Chatbot([], elem_id="chatbot", height=750, avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))))
chatbot = gr.Chatbot(
[],
elem_id="chatbot",
avatar_images=(None, (os.path.join(os.path.dirname(__file__), "avatar.png"))),
)
with gr.Row():
with gr.Column(scale=0.85):
txt = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False)
with gr.Column(scale=0.15, min_width=0):
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
txt = gr.Textbox(
scale=4,
show_label=False,
placeholder="Enter text and press enter, or upload an image",
container=False,
)
btn = gr.UploadButton("📁", file_types=["image", "video", "audio"])
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
bot, chatbot, chatbot

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import *\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " with gr.Box():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, label=\"\")\n", " with gr.Column():\n", " with gr.Box():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], label=\"\")\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], label=\"\")\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], label=\"\")\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import *\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.update(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.update(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, label=\"\")\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], label=\"\")\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], label=\"\")\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], label=\"\")\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -47,15 +47,13 @@ def create_issue_plot(libraries, issue_choices):
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
with gr.Box():
gr.Markdown("## Select libraries to display")
libraries = gr.CheckboxGroup(choices=LIBRARIES, label="")
gr.Markdown("## Select libraries to display")
libraries = gr.CheckboxGroup(choices=LIBRARIES, label="")
with gr.Column():
with gr.Box():
gr.Markdown("## Select graphs to display")
pip = gr.CheckboxGroup(choices=["Pip", "Cumulated"], label="")
stars = gr.CheckboxGroup(choices=["Stars", "Week over Week"], label="")
issues = gr.CheckboxGroup(choices=["Issue", "Exclude org members", "week over week"], label="")
gr.Markdown("## Select graphs to display")
pip = gr.CheckboxGroup(choices=["Pip", "Cumulated"], label="")
stars = gr.CheckboxGroup(choices=["Stars", "Week over Week"], label="")
issues = gr.CheckboxGroup(choices=["Issue", "Exclude org members", "week over week"], label="")
with gr.Row():
fetch = gr.Button(value="Fetch")
with gr.Row():

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: diff_texts"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["from difflib import Differ\n", "\n", "import gradio as gr\n", "\n", "\n", "def diff_texts(text1, text2):\n", " d = Differ()\n", " return [\n", " (token[2:], token[0] if token[0] != \" \" else None)\n", " for token in d.compare(text1, text2)\n", " ]\n", "\n", "\n", "demo = gr.Interface(\n", " diff_texts,\n", " [\n", " gr.Textbox(\n", " label=\"Text 1\",\n", " info=\"Initial text\",\n", " lines=3,\n", " value=\"The quick brown fox jumped over the lazy dogs.\",\n", " ),\n", " gr.Textbox(\n", " label=\"Text 2\",\n", " info=\"Text to compare\",\n", " lines=3,\n", " value=\"The fast brown fox jumps over lazy dogs.\",\n", " ),\n", " ],\n", " gr.HighlightedText(\n", " label=\"Diff\",\n", " combine_adjacent=True,\n", " show_legend=True,\n", " ).style(color_map={\"+\": \"red\", \"-\": \"green\"}),\n", " theme=gr.themes.Base()\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: diff_texts"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["from difflib import Differ\n", "\n", "import gradio as gr\n", "\n", "\n", "def diff_texts(text1, text2):\n", " d = Differ()\n", " return [\n", " (token[2:], token[0] if token[0] != \" \" else None)\n", " for token in d.compare(text1, text2)\n", " ]\n", "\n", "\n", "demo = gr.Interface(\n", " diff_texts,\n", " [\n", " gr.Textbox(\n", " label=\"Text 1\",\n", " info=\"Initial text\",\n", " lines=3,\n", " value=\"The quick brown fox jumped over the lazy dogs.\",\n", " ),\n", " gr.Textbox(\n", " label=\"Text 2\",\n", " info=\"Text to compare\",\n", " lines=3,\n", " value=\"The fast brown fox jumps over lazy dogs.\",\n", " ),\n", " ],\n", " gr.HighlightedText(\n", " label=\"Diff\",\n", " combine_adjacent=True,\n", " show_legend=True,\n", " color_map={\"+\": \"red\", \"-\": \"green\"}),\n", " theme=gr.themes.Base()\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -31,7 +31,7 @@ demo = gr.Interface(
label="Diff",
combine_adjacent=True,
show_legend=True,
).style(color_map={"+": "red", "-": "green"}),
color_map={"+": "red", "-": "green"}),
theme=gr.themes.Base()
)
if __name__ == "__main__":

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n", " ]\n", " ), f\"label {i}\" if i != 0 else \"label\" * 50)\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(variant=\"panel\"):\n", " with gr.Row(variant=\"compact\"):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(full_width=False)\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n", " ]\n", " ), f\"label {i}\" if i != 0 else \"label\" * 50)\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(variant=\"panel\"):\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\", scale=0)\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " , columns=[2], rows=[2], object_fit=\"contain\", height=\"auto\")\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -23,20 +23,18 @@ def fake_gan():
with gr.Blocks() as demo:
with gr.Column(variant="panel"):
with gr.Row(variant="compact"):
with gr.Row():
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
).style(
container=False,
)
btn = gr.Button("Generate image").style(full_width=False)
btn = gr.Button("Generate image", scale=0)
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(columns=[2], rows=[2], object_fit="contain", height="auto")
, columns=[2], rows=[2], object_fit="contain", height="auto")
btn.click(fake_gan, None, gallery)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_no_input"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " time.sleep(1)\n", " images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " ]\n", " return images\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=None,\n", " outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_no_input"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " time.sleep(1)\n", " images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " ]\n", " return images\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=None,\n", " outputs=gr.Gallery(label=\"Generated Images\", columns=[2]),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -16,7 +16,7 @@ def fake_gan():
demo = gr.Interface(
fn=fake_gan,
inputs=None,
outputs=gr.Gallery(label="Generated Images").style(grid=[2]),
outputs=gr.Gallery(label="Generated Images", columns=[2]),
title="FD-GAN",
description="This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.",
)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\",\n", " \"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg\",\n", " \"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip\",\n", " \"https://www.lifegate.com/app/uploads/ghepardo-primo-piano.jpg\",\n", " \"https://qph.cf2.quoracdn.net/main-qimg-0bbf31c18a22178cb7a8dd53640a3d05-lq\"\n", " ]\n", " gr.Gallery(value=cheetahs).style(grid=4)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: gallery_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr \n", "\n", "with gr.Blocks() as demo:\n", " cheetahs = [\n", " \"https://upload.wikimedia.org/wikipedia/commons/0/09/TheCheethcat.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-003.jpg\",\n", " \"https://img.etimg.com/thumb/msid-50159822,width-650,imgsize-129520,,resizemode-4,quality-100/.jpg\",\n", " \"https://nationalzoo.si.edu/sites/default/files/animals/cheetah-002.jpg\",\n", " \"https://images.theconversation.com/files/375893/original/file-20201218-13-a8h8uq.jpg?ixlib=rb-1.1.0&rect=16%2C407%2C5515%2C2924&q=45&auto=format&w=496&fit=clip\",\n", " \"https://www.lifegate.com/app/uploads/ghepardo-primo-piano.jpg\",\n", " \"https://qph.cf2.quoracdn.net/main-qimg-0bbf31c18a22178cb7a8dd53640a3d05-lq\"\n", " ]\n", " gr.Gallery(value=cheetahs, columns=4)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -10,6 +10,6 @@ with gr.Blocks() as demo:
"https://www.lifegate.com/app/uploads/ghepardo-primo-piano.jpg",
"https://qph.cf2.quoracdn.net/main-qimg-0bbf31c18a22178cb7a8dd53640a3d05-lq"
]
gr.Gallery(value=cheetahs).style(grid=4)
gr.Gallery(value=cheetahs, columns=4)
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage().style(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: image_segmentation\n", "### Simple image segmentation using gradio's AnnotatedImage component.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "import random\n", "\n", "with gr.Blocks() as demo:\n", " section_labels = [\n", " \"apple\",\n", " \"banana\",\n", " \"carrot\",\n", " \"donut\",\n", " \"eggplant\",\n", " \"fish\",\n", " \"grapes\",\n", " \"hamburger\",\n", " \"ice cream\",\n", " \"juice\",\n", " ]\n", "\n", " with gr.Row():\n", " num_boxes = gr.Slider(0, 5, 2, step=1, label=\"Number of boxes\")\n", " num_segments = gr.Slider(0, 5, 1, step=1, label=\"Number of segments\")\n", "\n", " with gr.Row():\n", " img_input = gr.Image()\n", " img_output = gr.AnnotatedImage(\n", " color_map={\"banana\": \"#a89a00\", \"carrot\": \"#ffae00\"}\n", " )\n", "\n", " section_btn = gr.Button(\"Identify Sections\")\n", " selected_section = gr.Textbox(label=\"Selected Section\")\n", "\n", " def section(img, num_boxes, num_segments):\n", " sections = []\n", " for a in range(num_boxes):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " w = random.randint(0, img.shape[1] - x)\n", " h = random.randint(0, img.shape[0] - y)\n", " sections.append(((x, y, x + w, y + h), section_labels[a]))\n", " for b in range(num_segments):\n", " x = random.randint(0, img.shape[1])\n", " y = random.randint(0, img.shape[0])\n", " r = random.randint(0, min(x, y, img.shape[1] - x, img.shape[0] - y))\n", " mask = np.zeros(img.shape[:2])\n", " for i in range(img.shape[0]):\n", " for j in range(img.shape[1]):\n", " dist_square = (i - y) ** 2 + (j - x) ** 2\n", " if dist_square < r**2:\n", " mask[i, j] = round((r**2 - dist_square) / r**2 * 4) / 4\n", " sections.append((mask, section_labels[b + num_boxes]))\n", " return (img, sections)\n", "\n", " section_btn.click(section, [img_input, num_boxes, num_segments], img_output)\n", "\n", " def select_section(evt: gr.SelectData):\n", " return section_labels[evt.index]\n", "\n", " img_output.select(select_section, None, selected_section)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -22,7 +22,7 @@ with gr.Blocks() as demo:
with gr.Row():
img_input = gr.Image()
img_output = gr.AnnotatedImage().style(
img_output = gr.AnnotatedImage(
color_map={"banana": "#a89a00", "carrot": "#ffae00"}
)

File diff suppressed because one or more lines are too long

View File

@ -115,7 +115,7 @@ demo = gr.Interface(
gr.Audio(label="Audio"),
gr.Image(label="Image"),
gr.Video(label="Video"),
gr.HighlightedText(label="HighlightedText").style(
gr.HighlightedText(label="HighlightedText",
color_map={"punc": "pink", "test 0": "blue"}
),
gr.HighlightedText(label="HighlightedText", show_legend=True),

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: lineplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from vega_datasets import data\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(\n", " data.stocks(),\n", " x=\"date\",\n", " y=\"price\",\n", " color=\"symbol\",\n", " color_legend_position=\"bottom\",\n", " title=\"Stock Prices\",\n", " tooltip=[\"date\", \"price\", \"symbol\"],\n", " height=300,\n", " width=300,\n", " show_label=False,\n", " ).style(\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: lineplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from vega_datasets import data\n", "\n", "with gr.Blocks() as demo:\n", " gr.LinePlot(\n", " data.stocks(),\n", " x=\"date\",\n", " y=\"price\",\n", " color=\"symbol\",\n", " color_legend_position=\"bottom\",\n", " title=\"Stock Prices\",\n", " tooltip=[\"date\", \"price\", \"symbol\"],\n", " height=300,\n", " width=300,\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -12,8 +12,6 @@ with gr.Blocks() as demo:
tooltip=["date", "price", "symbol"],
height=300,
width=300,
show_label=False,
).style(
container=False,
)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='<b>Name</b>: %{customdata[0]}<br><b>Price</b>: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot().style()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='<b>Name</b>: %{customdata[0]}<br><b>Price</b>: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -47,7 +47,7 @@ with gr.Blocks() as demo:
max_price = gr.Number(value=1000, label="Maximum Price")
boroughs = gr.CheckboxGroup(choices=["Queens", "Brooklyn", "Manhattan", "Bronx", "Staten Island"], value=["Queens", "Brooklyn"], label="Select Boroughs:")
btn = gr.Button(value="Update Filter")
map = gr.Plot().style()
map = gr.Plot()
demo.load(filter_map, [min_price, max_price, boroughs], map)
btn.click(filter_map, [min_price, max_price, boroughs], map)

View File

@ -84,6 +84,6 @@ with gr.Blocks() as bar_plot:
label="Type of Bar Plot"
)
with gr.Column():
plot = gr.BarPlot(show_label=False).style(container=True)
plot = gr.BarPlot(show_label=False)
display.change(bar_plot_fn, inputs=display, outputs=plot)
bar_plot.load(fn=bar_plot_fn, inputs=display, outputs=plot)

View File

@ -69,7 +69,7 @@ with gr.Blocks() as line_plot:
value="stocks",
)
with gr.Column():
plot = gr.LinePlot(show_label=False).style(container=False)
plot = gr.LinePlot(show_label=False, container=False)
dataset.change(line_plot_fn, inputs=dataset, outputs=plot)
line_plot.load(fn=line_plot_fn, inputs=dataset, outputs=plot)

View File

@ -39,7 +39,7 @@ with gr.Blocks() as scatter_plot:
with gr.Column():
dataset = gr.Dropdown(choices=["cars", "iris"], value="cars")
with gr.Column():
plot = gr.ScatterPlot(show_label=False).style(container=True)
plot = gr.ScatterPlot(show_label=False)
dataset.change(scatter_plot_fn, inputs=dataset, outputs=plot)
scatter_plot.load(fn=scatter_plot_fn, inputs=dataset, outputs=plot)

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: rows_and_columns"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah.jpg https://github.com/gradio-app/gradio/raw/main/demo/rows_and_columns/images/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " text1 = gr.Textbox(label=\"t1\")\n", " slider2 = gr.Textbox(label=\"s2\")\n", " drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n", " with gr.Row():\n", " with gr.Column(scale=1, min_width=600):\n", " text1 = gr.Textbox(label=\"prompt 1\")\n", " text2 = gr.Textbox(label=\"prompt 2\")\n", " inbtw = gr.Button(\"Between\")\n", " text4 = gr.Textbox(label=\"prompt 1\")\n", " text5 = gr.Textbox(label=\"prompt 2\")\n", " with gr.Column(scale=2, min_width=600):\n", " img1 = gr.Image(\"images/cheetah.jpg\")\n", " btn = gr.Button(\"Go\").style(full_width=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: rows_and_columns"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah.jpg https://github.com/gradio-app/gradio/raw/main/demo/rows_and_columns/images/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " text1 = gr.Textbox(label=\"t1\")\n", " slider2 = gr.Textbox(label=\"s2\")\n", " drop3 = gr.Dropdown([\"a\", \"b\", \"c\"], label=\"d3\")\n", " with gr.Row():\n", " with gr.Column(scale=1, min_width=600):\n", " text1 = gr.Textbox(label=\"prompt 1\")\n", " text2 = gr.Textbox(label=\"prompt 2\")\n", " inbtw = gr.Button(\"Between\")\n", " text4 = gr.Textbox(label=\"prompt 1\")\n", " text5 = gr.Textbox(label=\"prompt 2\")\n", " with gr.Column(scale=2, min_width=600):\n", " img1 = gr.Image(\"images/cheetah.jpg\")\n", " btn = gr.Button(\"Go\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -14,7 +14,7 @@ with gr.Blocks() as demo:
text5 = gr.Textbox(label="prompt 2")
with gr.Column(scale=2, min_width=600):
img1 = gr.Image("images/cheetah.jpg")
btn = gr.Button("Go").style(full_width=True)
btn = gr.Button("Go")
if __name__ == "__main__":
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: scatterplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from vega_datasets import data\n", "\n", "cars = data.cars()\n", "\n", "with gr.Blocks() as demo:\n", " gr.ScatterPlot(show_label=False,\n", " value=cars,\n", " x=\"Horsepower\",\n", " y=\"Miles_per_Gallon\",\n", " color=\"Origin\",\n", " tooltip=\"Name\",\n", " title=\"Car Data\",\n", " y_title=\"Miles per Gallon\",\n", " color_legend_title=\"Origin of Car\").style(container=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: scatterplot_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio vega_datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from vega_datasets import data\n", "\n", "cars = data.cars()\n", "\n", "with gr.Blocks() as demo:\n", " gr.ScatterPlot(\n", " value=cars,\n", " x=\"Horsepower\",\n", " y=\"Miles_per_Gallon\",\n", " color=\"Origin\",\n", " tooltip=\"Name\",\n", " title=\"Car Data\",\n", " y_title=\"Miles per Gallon\",\n", " color_legend_title=\"Origin of Car\",\n", " container=False,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -4,15 +4,17 @@ from vega_datasets import data
cars = data.cars()
with gr.Blocks() as demo:
gr.ScatterPlot(show_label=False,
value=cars,
x="Horsepower",
y="Miles_per_Gallon",
color="Origin",
tooltip="Name",
title="Car Data",
y_title="Miles per Gallon",
color_legend_title="Origin of Car").style(container=False)
gr.ScatterPlot(
value=cars,
x="Horsepower",
y="Miles_per_Gallon",
color="Origin",
tooltip="Name",
title="Car Data",
y_title="Miles per Gallon",
color_legend_title="Origin of Car",
container=False,
)
if __name__ == "__main__":
demo.launch()
demo.launch()

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image \n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16)\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed): \n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if(images_list[\"nsfw_content_detected\"][i]):\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", " \n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Box():\n", " with gr.Row().style(mobile_collapse=False, equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " border=(True, False, True, True),\n", " rounded=(True, False, False, True),\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(\n", " margin=False,\n", " rounded=(False, True, True, False),\n", " )\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(grid=[2], height=\"auto\")\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", " \n", "block.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if images_list[\"nsfw_content_detected\"][i]:\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " height=\"auto\",\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,16 +1,19 @@
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline
from PIL import Image
from PIL import Image
import os
auth_token = os.getenv("auth_token")
model_id = "CompVis/stable-diffusion-v1-4"
device = "cpu"
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token, revision="fp16", torch_dtype=torch.float16)
pipe = StableDiffusionPipeline.from_pretrained(
model_id, use_auth_token=auth_token, revision="fp16", torch_dtype=torch.float16
)
pipe = pipe.to(device)
def infer(prompt, samples, steps, scale, seed):
def infer(prompt, samples, steps, scale, seed):
generator = torch.Generator(device=device).manual_seed(seed)
images_list = pipe(
[prompt] * samples,
@ -21,37 +24,32 @@ def infer(prompt, samples, steps, scale, seed):
images = []
safe_image = Image.open(r"unsafe.png")
for i, image in enumerate(images_list["sample"]):
if(images_list["nsfw_content_detected"][i]):
if images_list["nsfw_content_detected"][i]:
images.append(safe_image)
else:
images.append(image)
return images
block = gr.Blocks()
with block:
with gr.Group():
with gr.Box():
with gr.Row().style(mobile_collapse=False, equal_height=True):
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
btn = gr.Button("Generate image").style(
margin=False,
rounded=(False, True, True, False),
)
with gr.Row():
text = gr.Textbox(
label="Enter your prompt",
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
btn = gr.Button("Generate image")
gallery = gr.Gallery(
label="Generated images", show_label=False, elem_id="gallery"
).style(grid=[2], height="auto")
label="Generated images",
show_label=False,
elem_id="gallery",
columns=[2],
height="auto",
)
advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
@ -75,5 +73,5 @@ with block:
[],
text,
)
block.launch()
block.launch()

View File

@ -377,10 +377,12 @@ class BlockContext(Block):
child.parent = pseudo_parent
self.children = children
def __exit__(self, *args):
def __exit__(self, exc_type: type[BaseException] | None = None, *args):
Context.block = self.parent
if exc_type is not None:
return
if getattr(self, "allow_expected_parents", True):
self.fill_expected_parents()
Context.block = self.parent
def postprocess(self, y):
"""
@ -432,12 +434,6 @@ class BlockFunction:
return str(self)
class class_or_instancemethod(classmethod): # noqa: N801
def __get__(self, instance, type_):
descr_get = super().__get__ if instance is None else self.__func__.__get__
return descr_get(instance, type_)
def postprocess_update_dict(block: Block, update_dict: dict, postprocess: bool = True):
"""
Converts a dictionary of updates into a format that can be sent to the frontend.
@ -1518,7 +1514,11 @@ Received outputs:
self.exited = False
return self
def __exit__(self, *args):
def __exit__(self, exc_type: type[BaseException] | None = None, *args):
if exc_type is not None:
Context.block = None
Context.root_block = None
return
super().fill_expected_parents()
Context.block = self.parent
# Configure the load events before root_block is reset
@ -1532,9 +1532,8 @@ Received outputs:
self.progress_tracking = any(block_fn.tracks_progress for block_fn in self.fns)
self.exited = True
@class_or_instancemethod
def load(
self_or_cls, # noqa: N805
self: Blocks | None = None,
fn: Callable | None = None,
inputs: list[Component] | None = None,
outputs: list[Component] | None = None,
@ -1559,10 +1558,8 @@ Received outputs:
For reverse compatibility reasons, this is both a class method and an instance
method, the two of which, confusingly, do two completely different things.
Class method: loads a demo from a Hugging Face Spaces repo and creates it locally and returns a block instance. Warning: this method will be deprecated. Use the equivalent `gradio.load()` instead.
Instance method: adds event that runs as soon as the demo loads in the browser. Example usage below.
Parameters:
name: Class Method - the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
@ -1591,7 +1588,7 @@ Received outputs:
demo.load(get_time, inputs=None, outputs=dt)
demo.launch()
"""
if isinstance(self_or_cls, type):
if self is None:
warn_deprecation(
"gr.Blocks.load() will be deprecated. Use gr.load() instead."
)
@ -1605,7 +1602,7 @@ Received outputs:
else:
from gradio.events import Dependency
dep, dep_index = self_or_cls.set_event_trigger(
dep, dep_index = self.set_event_trigger(
event_name="load",
fn=fn,
inputs=inputs,
@ -1622,7 +1619,7 @@ Received outputs:
every=every,
no_target=True,
)
return Dependency(self_or_cls, dep, dep_index)
return Dependency(self, dep, dep_index)
def clear(self):
"""Resets the layout of the Blocks object."""

View File

@ -227,6 +227,9 @@ class CheckboxGroup(
return None
elif not isinstance(input_data, list):
input_data = [input_data]
for data in input_data:
if data not in [c[0] for c in self.choices]:
raise ValueError(f"Example {data} provided not a valid choice.")
return [
next((c[0] for c in self.choices if c[1] == data), None)
for data in input_data

View File

@ -231,7 +231,7 @@ class Gallery(IOComponent, GallerySerializable, Selectable):
warn_style_method_deprecation()
if grid is not None:
warn_deprecation(
"The 'grid' parameter will be deprecated. Please use 'grid_cols' in the constructor instead.",
"The 'grid' parameter will be deprecated. Please use 'columns' in the constructor instead.",
)
self.grid_cols = grid
if columns is not None:

View File

@ -6,7 +6,7 @@ from typing import TYPE_CHECKING, Literal
from gradio_client.documentation import document, set_documentation_group
from gradio.blocks import BlockContext
from gradio.deprecation import warn_style_method_deprecation
from gradio.deprecation import warn_deprecation, warn_style_method_deprecation
from gradio.events import Changeable, Selectable
if TYPE_CHECKING:
@ -119,6 +119,11 @@ class Column(BlockContext):
visible: If False, column will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
if scale != round(scale):
warn_deprecation(
f"'scale' value should be an integer. Using {scale} will cause issues."
)
self.scale = scale
self.min_width = min_width
self.variant = variant
@ -273,9 +278,9 @@ class Group(BlockContext):
}
@document()
class Box(BlockContext):
"""
DEPRECATED.
Box is a a layout element which places children in a box with rounded corners and
some padding around them.
Example:

View File

@ -133,7 +133,6 @@ override_signature("Row", "with gradio.Row():")
override_signature("Column", "with gradio.Column():")
override_signature("Tab", "with gradio.Tab():")
override_signature("Group", "with gradio.Group():")
override_signature("Box", "with gradio.Box():")
override_signature("Dataset", "gr.Dataset(components, samples)")

View File

@ -18,9 +18,8 @@ export async function load({ parent }) {
let objs = [
docs.building.row,
docs.building.column,
docs.building.group,
docs.building.tab,
docs.building.box,
docs.building.group,
docs.building.accordion
];
@ -29,9 +28,9 @@ export async function load({ parent }) {
["Column", "column"],
["Tab", "tab"],
["Group", "group"],
["Box", "box"],
["Accordion", "accordion"]
];
let method_headers: string[][] = [];
const get_slug = make_slug_processor();

View File

@ -257,7 +257,6 @@
.label {
transition: 150ms;
margin-top: 1px;
margin-right: calc(var(--size-1) * -1);
border-radius: var(--radius-xs);
padding: 1px 5px;
color: var(--body-text-color);

4
pnpm-lock.yaml generated
View File

@ -1,4 +1,4 @@
lockfileVersion: '6.0'
lockfileVersion: '6.1'
settings:
autoInstallPeers: true
@ -6393,7 +6393,7 @@ packages:
peerDependencies:
'@sveltejs/kit': ^1.0.0
dependencies:
'@sveltejs/kit': 1.16.3(svelte@3.57.0)(vite@4.3.5)
'@sveltejs/kit': 1.16.3(svelte@3.59.2)(vite@4.3.9)
import-meta-resolve: 3.0.0
dev: true