mirror of
https://github.com/gradio-app/gradio.git
synced 2025-03-31 12:20:26 +08:00
Switch linting to Ruff (#3710)
* Sort requirements.in * Switch flake8 + isort to ruff * Apply ruff import order fixes * Fix ruff complaints in demo/ * Fix ruff complaints in test/ * Use `x is not y`, not `not x is y` * Remove unused listdir from website generator * Clean up duplicate dict keys * Add changelog entry * Clean up unused imports (except in gradio/__init__.py) * add space --------- Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
parent
96ef802fbd
commit
ef3862e075
@ -120,6 +120,7 @@ By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3581](https://git
|
||||
|
||||
- Removed heavily-mocked tests related to comet_ml, wandb, and mlflow as they added a significant amount of test dependencies that prevented installation of test dependencies on Windows environemnts. By [@abidlabs](https://github.com/abidlabs) in [PR 3608](https://github.com/gradio-app/gradio/pull/3608)
|
||||
- Added Windows continuous integration, by [@space-nuko](https://github.com/space-nuko) in [PR 3628](https://github.com/gradio-app/gradio/pull/3628)
|
||||
- Switched linting from flake8 + isort to `ruff`, by [@akx](https://github.com/akx) in [PR 3710](https://github.com/gradio-app/gradio/pull/3710)
|
||||
|
||||
## Breaking Changes:
|
||||
|
||||
|
@ -1,2 +1,7 @@
|
||||
from gradio_client.client import Client
|
||||
from gradio_client.utils import __version__
|
||||
|
||||
__all__ = [
|
||||
"Client",
|
||||
"__version__",
|
||||
]
|
||||
|
@ -41,3 +41,11 @@ include = [
|
||||
"/README.md",
|
||||
"/requirements.txt",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
extend = "../../pyproject.toml"
|
||||
|
||||
[tool.ruff.isort]
|
||||
known-first-party = [
|
||||
"gradio_client"
|
||||
]
|
||||
|
@ -4,9 +4,8 @@ set -e
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Linting..."
|
||||
python -m black --check test gradio_client
|
||||
python -m isort --profile=black --check-only test gradio_client
|
||||
python -m flake8 --ignore=E731,E501,E722,W503,E126,E203,F403,F541 test gradio_client --exclude gradio_client/__init__.py
|
||||
ruff test gradio_client
|
||||
black --check test gradio_client
|
||||
|
||||
echo "Testing..."
|
||||
python -m pip install -e ../../. # Install gradio from local source (as the latest version may not yet be published to PyPI)
|
||||
|
@ -3,6 +3,5 @@
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Formatting the client library.. Our style follows the Black code style."
|
||||
python -m black test gradio_client
|
||||
python -m isort --profile=black test gradio_client
|
||||
python -m flake8 --ignore=E731,E501,E722,W503,E126,E203,F403 test gradio_client --exclude gradio_client/__init__.py
|
||||
ruff --fix test gradio_client
|
||||
black test gradio_client
|
||||
|
@ -1,5 +1,4 @@
|
||||
black==22.6.0
|
||||
flake8==4.0.1
|
||||
isort==5.10.1
|
||||
pytest-asyncio
|
||||
pytest==7.1.2
|
||||
pytest-asyncio
|
||||
ruff==0.0.260
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,8 +1,5 @@
|
||||
import os
|
||||
from os.path import splitext
|
||||
import numpy as np
|
||||
import sys
|
||||
import matplotlib.pyplot as plt
|
||||
import torch
|
||||
import torchvision
|
||||
import wget
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from PIL import Image\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.outputs.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: animeganv2\n", "### Recreate the viral AnimeGAN image transformation demo.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchvision Pillow gdown numpy scipy cmake onnxruntime-gpu opencv-python-headless"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/gongyoo.jpeg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/animeganv2/groot.jpeg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "\n", "model2 = torch.hub.load(\n", " \"AK391/animegan2-pytorch:main\",\n", " \"generator\",\n", " pretrained=True,\n", " progress=False\n", ")\n", "model1 = torch.hub.load(\"AK391/animegan2-pytorch:main\", \"generator\", pretrained=\"face_paint_512_v1\")\n", "face2paint = torch.hub.load(\n", " 'AK391/animegan2-pytorch:main', 'face2paint', \n", " size=512,side_by_side=False\n", ")\n", "\n", "def inference(img, ver):\n", " if ver == 'version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)':\n", " out = face2paint(model2, img)\n", " else:\n", " out = face2paint(model1, img)\n", " return out\n", "\n", "title = \"AnimeGANv2\"\n", "description = \"Gradio Demo for AnimeGanv2 Face Portrait. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below.\"\n", "article = \"<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=akhaliq_animegan' alt='visitor badge'></center></p>\"\n", "examples=[['groot.jpeg','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'],['gongyoo.jpeg','version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)']]\n", "\n", "demo = gr.Interface(\n", " fn=inference, \n", " inputs=[gr.inputs.Image(type=\"pil\"),gr.inputs.Radio(['version 1 (\ud83d\udd3a stylization, \ud83d\udd3b robustness)','version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)'], type=\"value\", default='version 2 (\ud83d\udd3a robustness,\ud83d\udd3b stylization)', label='version')], \n", " outputs=gr.outputs.Image(type=\"pil\"),\n", " title=title,\n", " description=description,\n", " article=article,\n", " examples=examples)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,5 +1,4 @@
|
||||
import gradio as gr
|
||||
from PIL import Image
|
||||
import torch
|
||||
|
||||
model2 = torch.hub.load(
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.plotting import figure\n", "from bokeh.tile_providers import get_provider\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " tile_provider = get_provider(xyz.OpenStreetMap.Mapnik)\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(tile_provider)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.tile_providers import get_provider\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " tile_provider = get_provider(xyz.OpenStreetMap.Mapnik)\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(tile_provider)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,5 @@
|
||||
import gradio as gr
|
||||
import xyzservices.providers as xyz
|
||||
from bokeh.plotting import figure
|
||||
from bokeh.tile_providers import get_provider
|
||||
from bokeh.models import ColumnDataSource, Whisker
|
||||
from bokeh.plotting import figure
|
||||
@ -91,4 +90,4 @@ with gr.Blocks() as demo:
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
demo.launch()
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chicago-bikeshare-dashboard"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio psycopg2 matplotlib SQLAlchemy "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "\n", "matplotlib.use(\"Agg\")\n", "\n", "DB_USER = os.getenv(\"DB_USER\")\n", "DB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n", "DB_HOST = os.getenv(\"DB_HOST\")\n", "PORT = 8080\n", "DB_NAME = \"bikeshare\"\n", "\n", "connection_string = (\n", " f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n", ")\n", "\n", "\n", "def get_count_ride_type():\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, rideable_type\n", " FROM rides\n", " GROUP BY rideable_type\n", " ORDER BY n DESC\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "def get_most_popular_stations():\n", "\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n", " FROM RIDES\n", " WHERE start_station_name is NOT NULL\n", " GROUP BY start_station_id\n", " ORDER BY n DESC\n", " LIMIT 5\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Chicago Bike Share Dashboard\n", " \n", " This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n", " This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n", " is compatible with gradio.\n", " \n", " Connection credentials are handled by environment variables\n", " defined as secrets in the Space.\n", "\n", " If data were added to the database, the plots in this demo would update\n", " whenever the webpage is reloaded.\n", " \n", " This demo serves as a starting point for your database-connected apps!\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " bike_type = gr.BarPlot(\n", " x=\"rideable_type\",\n", " y='n',\n", " title=\"Number of rides per bicycle type\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Bicycle Type\",\n", " vertical=False,\n", " tooltip=['rideable_type', \"n\"],\n", " height=300,\n", " width=300,\n", " )\n", " station = gr.BarPlot(\n", " x='station',\n", " y='n',\n", " title=\"Most Popular Stations\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Station Name\",\n", " vertical=False,\n", " tooltip=['station', 'n'],\n", " height=300,\n", " width=300\n", " )\n", "\n", " demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n", " demo.load(get_most_popular_stations, inputs=None, outputs=station)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chicago-bikeshare-dashboard"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio psycopg2 matplotlib SQLAlchemy "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import os\n", "import gradio as gr\n", "import matplotlib\n", "import pandas as pd\n", "\n", "matplotlib.use(\"Agg\")\n", "\n", "DB_USER = os.getenv(\"DB_USER\")\n", "DB_PASSWORD = os.getenv(\"DB_PASSWORD\")\n", "DB_HOST = os.getenv(\"DB_HOST\")\n", "PORT = 8080\n", "DB_NAME = \"bikeshare\"\n", "\n", "connection_string = (\n", " f\"postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}?port={PORT}&dbname={DB_NAME}\"\n", ")\n", "\n", "\n", "def get_count_ride_type():\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, rideable_type\n", " FROM rides\n", " GROUP BY rideable_type\n", " ORDER BY n DESC\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "def get_most_popular_stations():\n", "\n", " df = pd.read_sql(\n", " \"\"\"\n", " SELECT COUNT(ride_id) as n, MAX(start_station_name) as station\n", " FROM RIDES\n", " WHERE start_station_name is NOT NULL\n", " GROUP BY start_station_id\n", " ORDER BY n DESC\n", " LIMIT 5\n", " \"\"\",\n", " con=connection_string,\n", " )\n", " return df\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown(\n", " \"\"\"\n", " # Chicago Bike Share Dashboard\n", " \n", " This demo pulls Chicago bike share data for March 2022 from a postgresql database hosted on AWS.\n", " This demo uses psycopg2 but any postgresql client library (SQLAlchemy)\n", " is compatible with gradio.\n", " \n", " Connection credentials are handled by environment variables\n", " defined as secrets in the Space.\n", "\n", " If data were added to the database, the plots in this demo would update\n", " whenever the webpage is reloaded.\n", " \n", " This demo serves as a starting point for your database-connected apps!\n", " \"\"\"\n", " )\n", " with gr.Row():\n", " bike_type = gr.BarPlot(\n", " x=\"rideable_type\",\n", " y='n',\n", " title=\"Number of rides per bicycle type\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Bicycle Type\",\n", " vertical=False,\n", " tooltip=['rideable_type', \"n\"],\n", " height=300,\n", " width=300,\n", " )\n", " station = gr.BarPlot(\n", " x='station',\n", " y='n',\n", " title=\"Most Popular Stations\",\n", " y_title=\"Number of Rides\",\n", " x_title=\"Station Name\",\n", " vertical=False,\n", " tooltip=['station', 'n'],\n", " height=300,\n", " width=300\n", " )\n", "\n", " demo.load(get_count_ride_type, inputs=None, outputs=bike_type)\n", " demo.load(get_most_popular_stations, inputs=None, outputs=station)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,7 +1,6 @@
|
||||
import os
|
||||
import gradio as gr
|
||||
import matplotlib
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
|
||||
matplotlib.use("Agg")
|
||||
|
File diff suppressed because one or more lines are too long
@ -5,7 +5,6 @@ import numpy as np
|
||||
from PIL import Image
|
||||
import open3d as o3d
|
||||
from pathlib import Path
|
||||
import os
|
||||
|
||||
feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
|
||||
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
|
||||
@ -38,7 +37,7 @@ def process_image(image_path):
|
||||
gltf_path = create_3d_obj(np.array(image), depth_image, image_path)
|
||||
img = Image.fromarray(depth_image)
|
||||
return [img, gltf_path, gltf_path]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
gltf_path = create_3d_obj(
|
||||
np.array(image), depth_image, image_path, depth=8)
|
||||
img = Image.fromarray(depth_image)
|
||||
@ -79,7 +78,7 @@ def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
|
||||
[0, 0, 0, 1]])
|
||||
|
||||
print('run Poisson surface reconstruction')
|
||||
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
|
||||
with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):
|
||||
mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
|
||||
pcd, depth=depth, width=0, scale=1.1, linear_fit=True)
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: digit_classifier"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import os\n", "from urllib.request import urlretrieve\n", "\n", "import tensorflow as tf\n", "\n", "import gradio\n", "import gradio as gr\n", "\n", "urlretrieve(\n", " \"https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5\", \"mnist-model.h5\"\n", ")\n", "model = tf.keras.models.load_model(\"mnist-model.h5\")\n", "\n", "\n", "def recognize_digit(image):\n", " image = image.reshape(1, -1)\n", " prediction = model.predict(image).tolist()[0]\n", " return {str(i): prediction[i] for i in range(10)}\n", "\n", "\n", "im = gradio.Image(shape=(28, 28), image_mode=\"L\", invert_colors=False, source=\"canvas\")\n", "\n", "demo = gr.Interface(\n", " recognize_digit,\n", " im,\n", " gradio.Label(num_top_classes=3),\n", " live=True,\n", " interpretation=\"default\",\n", " capture_session=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: digit_classifier"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["from urllib.request import urlretrieve\n", "\n", "import tensorflow as tf\n", "\n", "import gradio\n", "import gradio as gr\n", "\n", "urlretrieve(\n", " \"https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5\", \"mnist-model.h5\"\n", ")\n", "model = tf.keras.models.load_model(\"mnist-model.h5\")\n", "\n", "\n", "def recognize_digit(image):\n", " image = image.reshape(1, -1)\n", " prediction = model.predict(image).tolist()[0]\n", " return {str(i): prediction[i] for i in range(10)}\n", "\n", "\n", "im = gradio.Image(shape=(28, 28), image_mode=\"L\", invert_colors=False, source=\"canvas\")\n", "\n", "demo = gr.Interface(\n", " recognize_digit,\n", " im,\n", " gradio.Label(num_top_classes=3),\n", " live=True,\n", " interpretation=\"default\",\n", " capture_session=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,4 +1,3 @@
|
||||
import os
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
import tensorflow as tf
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import os\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n", " ]\n", " ), f\"label {i}\" if i != 0 else \"label\" * 50)\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(variant=\"panel\"):\n", " with gr.Row(variant=\"compact\"):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(full_width=False)\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(grid=[2], height=\"auto\")\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan\n", "### This is a fake GAN that shows how to create a text-to-image interface for image generation. Check out the Stable Diffusion demo for more: https://hf.co/spaces/stabilityai/stable-diffusion/\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan/files/cheetah1.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " images = [\n", " (random.choice(\n", " [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " \"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80\",\n", " ]\n", " ), f\"label {i}\" if i != 0 else \"label\" * 50)\n", " for i in range(3)\n", " ]\n", " return images\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column(variant=\"panel\"):\n", " with gr.Row(variant=\"compact\"):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(full_width=False)\n", "\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(grid=[2], height=\"auto\")\n", "\n", " btn.click(fake_gan, None, gallery)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,5 @@
|
||||
# This demo needs to be run from the repo folder.
|
||||
# python demo/fake_gan/run.py
|
||||
import os
|
||||
import random
|
||||
|
||||
import gradio as gr
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_2"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/cheetah1.jpg\n", "!wget -q -O files/elephant.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/elephant.jpg\n", "!wget -q -O files/tiger.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/tiger.jpg\n", "!wget -q -O files/zebra.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/zebra.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import os\n", "import random\n", "import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan(desc):\n", " if desc == \"NSFW\":\n", " raise gr.Error(\"NSFW - banned content.\")\n", " if desc == \"error\":\n", " raise ValueError(\"error\")\n", " time.sleep(9)\n", " image = random.choice(\n", " [\n", " \"files/cheetah1.jpg\",\n", " \"files/elephant.jpg\",\n", " \"files/tiger.jpg\",\n", " \"files/zebra.jpg\",\n", " ]\n", " )\n", " return image\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=gr.Textbox(),\n", " outputs=gr.Image(label=\"Generated Image\"),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "demo.queue(max_size=3)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_2"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/cheetah1.jpg\n", "!wget -q -O files/elephant.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/elephant.jpg\n", "!wget -q -O files/tiger.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/tiger.jpg\n", "!wget -q -O files/zebra.jpg https://github.com/gradio-app/gradio/raw/main/demo/fake_gan_2/files/zebra.jpg"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["# This demo needs to be run from the repo folder.\n", "# python demo/fake_gan/run.py\n", "import random\n", "import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan(desc):\n", " if desc == \"NSFW\":\n", " raise gr.Error(\"NSFW - banned content.\")\n", " if desc == \"error\":\n", " raise ValueError(\"error\")\n", " time.sleep(9)\n", " image = random.choice(\n", " [\n", " \"files/cheetah1.jpg\",\n", " \"files/elephant.jpg\",\n", " \"files/tiger.jpg\",\n", " \"files/zebra.jpg\",\n", " ]\n", " )\n", " return image\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=gr.Textbox(),\n", " outputs=gr.Image(label=\"Generated Image\"),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "demo.queue(max_size=3)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,5 @@
|
||||
# This demo needs to be run from the repo folder.
|
||||
# python demo/fake_gan/run.py
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_no_input"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import random\n", "import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " time.sleep(1)\n", " images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " ]\n", " return images\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=None,\n", " outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: fake_gan_no_input"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import time\n", "\n", "import gradio as gr\n", "\n", "\n", "def fake_gan():\n", " time.sleep(1)\n", " images = [\n", " \"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80\",\n", " \"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80\",\n", " \"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80\",\n", " ]\n", " return images\n", "\n", "\n", "demo = gr.Interface(\n", " fn=fake_gan,\n", " inputs=None,\n", " outputs=gr.Gallery(label=\"Generated Images\").style(grid=[2]),\n", " title=\"FD-GAN\",\n", " description=\"This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,4 +1,3 @@
|
||||
import random
|
||||
import time
|
||||
|
||||
import gradio as gr
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hangman"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "\n", "secret_word = \"gradio\"\n", "\n", "with gr.Blocks() as demo: \n", " used_letters_var = gr.State([])\n", " with gr.Row() as row:\n", " with gr.Column():\n", " input_letter = gr.Textbox(label=\"Enter letter\")\n", " btn = gr.Button(\"Guess Letter\")\n", " with gr.Column():\n", " hangman = gr.Textbox(\n", " label=\"Hangman\",\n", " value=\"_\"*len(secret_word)\n", " )\n", " used_letters_box = gr.Textbox(label=\"Used Letters\")\n", "\n", " def guess_letter(letter, used_letters):\n", " used_letters.append(letter)\n", " answer = \"\".join([\n", " (letter if letter in used_letters else \"_\")\n", " for letter in secret_word\n", " ])\n", " return {\n", " used_letters_var: used_letters,\n", " used_letters_box: \", \".join(used_letters),\n", " hangman: answer\n", " }\n", " btn.click(\n", " guess_letter, \n", " [input_letter, used_letters_var],\n", " [used_letters_var, used_letters_box, hangman]\n", " )\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: hangman"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "secret_word = \"gradio\"\n", "\n", "with gr.Blocks() as demo: \n", " used_letters_var = gr.State([])\n", " with gr.Row() as row:\n", " with gr.Column():\n", " input_letter = gr.Textbox(label=\"Enter letter\")\n", " btn = gr.Button(\"Guess Letter\")\n", " with gr.Column():\n", " hangman = gr.Textbox(\n", " label=\"Hangman\",\n", " value=\"_\"*len(secret_word)\n", " )\n", " used_letters_box = gr.Textbox(label=\"Used Letters\")\n", "\n", " def guess_letter(letter, used_letters):\n", " used_letters.append(letter)\n", " answer = \"\".join([\n", " (letter if letter in used_letters else \"_\")\n", " for letter in secret_word\n", " ])\n", " return {\n", " used_letters_var: used_letters,\n", " used_letters_box: \", \".join(used_letters),\n", " hangman: answer\n", " }\n", " btn.click(\n", " guess_letter, \n", " [input_letter, used_letters_var],\n", " [used_letters_var, used_letters_box, hangman]\n", " )\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,5 +1,4 @@
|
||||
import gradio as gr
|
||||
import random
|
||||
|
||||
secret_word = "gradio"
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: main_note"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio scipy numpy matplotlib"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/main_note/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/main_note/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["from math import log2, pow\n", "import os\n", "\n", "import numpy as np\n", "from scipy.fftpack import fft\n", "\n", "import gradio as gr\n", "\n", "A4 = 440\n", "C0 = A4 * pow(2, -4.75)\n", "name = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n", "\n", "\n", "def get_pitch(freq):\n", " h = round(12 * log2(freq / C0))\n", " n = h % 12\n", " return name[n]\n", "\n", "\n", "def main_note(audio):\n", " rate, y = audio\n", " if len(y.shape) == 2:\n", " y = y.T[0]\n", " N = len(y)\n", " T = 1.0 / rate\n", " x = np.linspace(0.0, N * T, N)\n", " yf = fft(y)\n", " yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n", " xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n", "\n", " volume_per_pitch = {}\n", " total_volume = np.sum(yf2)\n", " for freq, volume in zip(xf, yf2):\n", " if freq == 0:\n", " continue\n", " pitch = get_pitch(freq)\n", " if pitch not in volume_per_pitch:\n", " volume_per_pitch[pitch] = 0\n", " volume_per_pitch[pitch] += 1.0 * volume / total_volume\n", " volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n", " return volume_per_pitch\n", "\n", "\n", "demo = gr.Interface(\n", " main_note,\n", " gr.Audio(source=\"microphone\"),\n", " gr.Label(num_top_classes=4),\n", " examples=[\n", " [os.path.join(os.path.abspath(''),\"audio/recording1.wav\")],\n", " [os.path.join(os.path.abspath(''),\"audio/cantina.wav\")],\n", " ],\n", " interpretation=\"default\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: main_note"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio scipy numpy matplotlib"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/main_note/audio/cantina.wav\n", "!wget -q -O audio/recording1.wav https://github.com/gradio-app/gradio/raw/main/demo/main_note/audio/recording1.wav"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["from math import log2, pow\n", "import os\n", "\n", "import numpy as np\n", "from scipy.fftpack import fft\n", "\n", "import gradio as gr\n", "\n", "A4 = 440\n", "C0 = A4 * pow(2, -4.75)\n", "name = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n", "\n", "\n", "def get_pitch(freq):\n", " h = round(12 * log2(freq / C0))\n", " n = h % 12\n", " return name[n]\n", "\n", "\n", "def main_note(audio):\n", " rate, y = audio\n", " if len(y.shape) == 2:\n", " y = y.T[0]\n", " N = len(y)\n", " T = 1.0 / rate\n", " yf = fft(y)\n", " yf2 = 2.0 / N * np.abs(yf[0 : N // 2])\n", " xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)\n", "\n", " volume_per_pitch = {}\n", " total_volume = np.sum(yf2)\n", " for freq, volume in zip(xf, yf2):\n", " if freq == 0:\n", " continue\n", " pitch = get_pitch(freq)\n", " if pitch not in volume_per_pitch:\n", " volume_per_pitch[pitch] = 0\n", " volume_per_pitch[pitch] += 1.0 * volume / total_volume\n", " volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}\n", " return volume_per_pitch\n", "\n", "\n", "demo = gr.Interface(\n", " main_note,\n", " gr.Audio(source=\"microphone\"),\n", " gr.Label(num_top_classes=4),\n", " examples=[\n", " [os.path.join(os.path.abspath(''),\"audio/recording1.wav\")],\n", " [os.path.join(os.path.abspath(''),\"audio/cantina.wav\")],\n", " ],\n", " interpretation=\"default\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -23,7 +23,6 @@ def main_note(audio):
|
||||
y = y.T[0]
|
||||
N = len(y)
|
||||
T = 1.0 / rate
|
||||
x = np.linspace(0.0, N * T, N)
|
||||
yf = fft(y)
|
||||
yf2 = 2.0 / N * np.abs(yf[0 : N // 2])
|
||||
xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='<b>Name</b>: %{customdata[0]}<br><b>Price</b>: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot().style()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: map_airbnb\n", "### Display an interactive map of AirBnB locations with Plotly. Data is hosted on HuggingFace Datasets. \n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.graph_objects as go\n", "from datasets import load_dataset\n", "\n", "dataset = load_dataset(\"gradio/NYC-Airbnb-Open-Data\", split=\"train\")\n", "df = dataset.to_pandas()\n", "\n", "def filter_map(min_price, max_price, boroughs):\n", "\n", " filtered_df = df[(df['neighbourhood_group'].isin(boroughs)) & \n", " (df['price'] > min_price) & (df['price'] < max_price)]\n", " names = filtered_df[\"name\"].tolist()\n", " prices = filtered_df[\"price\"].tolist()\n", " text_list = [(names[i], prices[i]) for i in range(0, len(names))]\n", " fig = go.Figure(go.Scattermapbox(\n", " customdata=text_list,\n", " lat=filtered_df['latitude'].tolist(),\n", " lon=filtered_df['longitude'].tolist(),\n", " mode='markers',\n", " marker=go.scattermapbox.Marker(\n", " size=6\n", " ),\n", " hoverinfo=\"text\",\n", " hovertemplate='<b>Name</b>: %{customdata[0]}<br><b>Price</b>: $%{customdata[1]}'\n", " ))\n", "\n", " fig.update_layout(\n", " mapbox_style=\"open-street-map\",\n", " hovermode='closest',\n", " mapbox=dict(\n", " bearing=0,\n", " center=go.layout.mapbox.Center(\n", " lat=40.67,\n", " lon=-73.90\n", " ),\n", " pitch=0,\n", " zoom=9\n", " ),\n", " )\n", "\n", " return fig\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Column():\n", " with gr.Row():\n", " min_price = gr.Number(value=250, label=\"Minimum Price\")\n", " max_price = gr.Number(value=1000, label=\"Maximum Price\")\n", " boroughs = gr.CheckboxGroup(choices=[\"Queens\", \"Brooklyn\", \"Manhattan\", \"Bronx\", \"Staten Island\"], value=[\"Queens\", \"Brooklyn\"], label=\"Select Boroughs:\")\n", " btn = gr.Button(value=\"Update Filter\")\n", " map = gr.Plot().style()\n", " demo.load(filter_map, [min_price, max_price, boroughs], map)\n", " btn.click(filter_map, [min_price, max_price, boroughs], map)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,5 +1,4 @@
|
||||
import gradio as gr
|
||||
import pandas as pd
|
||||
import plotly.graph_objects as go
|
||||
from datasets import load_dataset
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: musical_instrument_identification\n", "### This demo identifies musical instruments from an audio file. It uses Gradio's Audio and Label components.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 librosa==0.9.2 gdown"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/musical_instrument_identification/data_setups.py"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch, torchaudio\n", "from timeit import default_timer as timer\n", "from data_setups import audio_preprocess, resample\n", "import gdown\n", "\n", "url = 'https://drive.google.com/uc?id=1X5CR18u0I-ZOi_8P0cNptCe5JGk9Ro0C'\n", "output = 'piano.wav'\n", "gdown.download(url, output, quiet=False)\n", "url = 'https://drive.google.com/uc?id=1W-8HwmGR5SiyDbUcGAZYYDKdCIst07__'\n", "output= 'torch_efficientnet_fold2_CNN.pth'\n", "gdown.download(url, output, quiet=False)\n", "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "SAMPLE_RATE = 44100\n", "AUDIO_LEN = 2.90\n", "model = torch.load(\"torch_efficientnet_fold2_CNN.pth\", map_location=torch.device('cpu'))\n", "LABELS = [\n", " \"Cello\", \"Clarinet\", \"Flute\", \"Acoustic Guitar\", \"Electric Guitar\", \"Organ\", \"Piano\", \"Saxophone\", \"Trumpet\", \"Violin\", \"Voice\"\n", "]\n", "example_list = [\n", " [\"piano.wav\"]\n", "]\n", "\n", "\n", "def predict(audio_path):\n", " start_time = timer()\n", " wavform, sample_rate = torchaudio.load(audio_path)\n", " wav = resample(wavform, sample_rate, SAMPLE_RATE)\n", " if len(wav) > int(AUDIO_LEN * SAMPLE_RATE):\n", " wav = wav[:int(AUDIO_LEN * SAMPLE_RATE)]\n", " else:\n", " print(f\"input length {len(wav)} too small!, need over {int(AUDIO_LEN * SAMPLE_RATE)}\")\n", " return\n", " img = audio_preprocess(wav, SAMPLE_RATE).unsqueeze(0)\n", " model.eval()\n", " with torch.inference_mode():\n", " pred_probs = torch.softmax(model(img), dim=1)\n", " pred_labels_and_probs = {LABELS[i]: float(pred_probs[0][i]) for i in range(len(LABELS))}\n", " pred_time = round(timer() - start_time, 5)\n", " return pred_labels_and_probs, pred_time\n", "\n", "demo = gr.Interface(fn=predict,\n", " inputs=gr.Audio(type=\"filepath\"),\n", " outputs=[gr.Label(num_top_classes=11, label=\"Predictions\"), \n", " gr.Number(label=\"Prediction time (s)\")],\n", " examples=example_list,\n", " cache_examples=False\n", " )\n", "\n", "demo.launch(debug=False)"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: musical_instrument_identification\n", "### This demo identifies musical instruments from an audio file. It uses Gradio's Audio and Label components.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 librosa==0.9.2 gdown"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/musical_instrument_identification/data_setups.py"]}, {"cell_type": "code", "execution_count": null, "id": 44380577570523278879349135829904343037, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "import torchaudio\n", "from timeit import default_timer as timer\n", "from data_setups import audio_preprocess, resample\n", "import gdown\n", "\n", "url = 'https://drive.google.com/uc?id=1X5CR18u0I-ZOi_8P0cNptCe5JGk9Ro0C'\n", "output = 'piano.wav'\n", "gdown.download(url, output, quiet=False)\n", "url = 'https://drive.google.com/uc?id=1W-8HwmGR5SiyDbUcGAZYYDKdCIst07__'\n", "output= 'torch_efficientnet_fold2_CNN.pth'\n", "gdown.download(url, output, quiet=False)\n", "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n", "SAMPLE_RATE = 44100\n", "AUDIO_LEN = 2.90\n", "model = torch.load(\"torch_efficientnet_fold2_CNN.pth\", map_location=torch.device('cpu'))\n", "LABELS = [\n", " \"Cello\", \"Clarinet\", \"Flute\", \"Acoustic Guitar\", \"Electric Guitar\", \"Organ\", \"Piano\", \"Saxophone\", \"Trumpet\", \"Violin\", \"Voice\"\n", "]\n", "example_list = [\n", " [\"piano.wav\"]\n", "]\n", "\n", "\n", "def predict(audio_path):\n", " start_time = timer()\n", " wavform, sample_rate = torchaudio.load(audio_path)\n", " wav = resample(wavform, sample_rate, SAMPLE_RATE)\n", " if len(wav) > int(AUDIO_LEN * SAMPLE_RATE):\n", " wav = wav[:int(AUDIO_LEN * SAMPLE_RATE)]\n", " else:\n", " print(f\"input length {len(wav)} too small!, need over {int(AUDIO_LEN * SAMPLE_RATE)}\")\n", " return\n", " img = audio_preprocess(wav, SAMPLE_RATE).unsqueeze(0)\n", " model.eval()\n", " with torch.inference_mode():\n", " pred_probs = torch.softmax(model(img), dim=1)\n", " pred_labels_and_probs = {LABELS[i]: float(pred_probs[0][i]) for i in range(len(LABELS))}\n", " pred_time = round(timer() - start_time, 5)\n", " return pred_labels_and_probs, pred_time\n", "\n", "demo = gr.Interface(fn=predict,\n", " inputs=gr.Audio(type=\"filepath\"),\n", " outputs=[gr.Label(num_top_classes=11, label=\"Predictions\"), \n", " gr.Number(label=\"Prediction time (s)\")],\n", " examples=example_list,\n", " cache_examples=False\n", " )\n", "\n", "demo.launch(debug=False)\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,5 +1,6 @@
|
||||
import gradio as gr
|
||||
import torch, torchaudio
|
||||
import torch
|
||||
import torchaudio
|
||||
from timeit import default_timer as timer
|
||||
from data_setups import audio_preprocess, resample
|
||||
import gdown
|
||||
@ -47,4 +48,4 @@ demo = gr.Interface(fn=predict,
|
||||
cache_examples=False
|
||||
)
|
||||
|
||||
demo.launch(debug=False)
|
||||
demo.launch(debug=False)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: progress"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tqdm datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "import tqdm\n", "from datasets import load_dataset\n", "import shutil\n", "from uuid import uuid4\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " text = gr.Textbox()\n", " textb = gr.Textbox()\n", " with gr.Row():\n", " load_set_btn = gr.Button(\"Load Set\")\n", " load_nested_set_btn = gr.Button(\"Load Nested Set\")\n", " load_random_btn = gr.Button(\"Load Random\")\n", " clean_imgs_btn = gr.Button(\"Clean Images\")\n", " wait_btn = gr.Button(\"Wait\")\n", " do_all_btn = gr.Button(\"Do All\")\n", " track_tqdm_btn = gr.Button(\"Bind TQDM\")\n", " bind_internal_tqdm_btn = gr.Button(\"Bind Internal TQDM\")\n", "\n", " text2 = gr.Textbox()\n", "\n", " # track list\n", " def load_set(text, text2, progress=gr.Progress()):\n", " imgs = [None] * 24\n", " for img in progress.tqdm(imgs, desc=\"Loading from list\"):\n", " time.sleep(0.1)\n", " return \"done\"\n", " load_set_btn.click(load_set, [text, textb], text2)\n", "\n", " # track nested list\n", " def load_nested_set(text, text2, progress=gr.Progress()):\n", " imgs = [[None] * 8] * 3\n", " for img_set in progress.tqdm(imgs, desc=\"Nested list\"):\n", " time.sleep(2)\n", " for img in progress.tqdm(img_set, desc=\"inner list\"):\n", " time.sleep(0.1)\n", " return \"done\"\n", " load_nested_set_btn.click(load_nested_set, [text, textb], text2)\n", "\n", " # track iterable of unknown length\n", " def load_random(data, progress=gr.Progress()):\n", " def yielder():\n", " for i in range(0, random.randint(15, 20)):\n", " time.sleep(0.1)\n", " yield None\n", " for img in progress.tqdm(yielder()):\n", " pass\n", " return \"done\"\n", " load_random_btn.click(load_random, {text, textb}, text2)\n", " \n", " # manual progress\n", " def clean_imgs(text, progress=gr.Progress()):\n", " progress(0.2, desc=\"Collecting Images\")\n", " time.sleep(1)\n", " progress(0.5, desc=\"Cleaning Images\")\n", " time.sleep(1.5)\n", " progress(0.8, desc=\"Sending Images\")\n", " time.sleep(1.5)\n", " return \"done\"\n", " clean_imgs_btn.click(clean_imgs, text, text2)\n", "\n", " # no progress\n", " def wait(text):\n", " time.sleep(4)\n", " return \"done\"\n", " wait_btn.click(wait, text, text2)\n", "\n", " # multiple progressions\n", " def do_all(data, progress=gr.Progress()):\n", " load_set(data[text], data[textb], progress)\n", " load_random(data, progress)\n", " clean_imgs(data[text], progress)\n", " progress(None)\n", " wait(text)\n", " return \"done\"\n", " do_all_btn.click(do_all, {text, textb}, text2)\n", "\n", " def track_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n", " for i in tqdm.tqdm(range(5), desc=\"outer\"):\n", " for j in tqdm.tqdm(range(4), desc=\"inner\"):\n", " time.sleep(1)\n", " return \"done\"\n", " track_tqdm_btn.click(track_tqdm, {text, textb}, text2)\n", "\n", " def bind_internal_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n", " outdir = \"__tmp/\" + str(uuid4())\n", " dataset = load_dataset(\"beans\", split=\"train\", cache_dir=outdir)\n", " shutil.rmtree(outdir)\n", " return \"done\"\n", " bind_internal_tqdm_btn.click(bind_internal_tqdm, {text, textb}, text2)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(concurrency_count=20).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: progress"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tqdm datasets"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "import time\n", "import tqdm\n", "from datasets import load_dataset\n", "import shutil\n", "from uuid import uuid4\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " text = gr.Textbox()\n", " textb = gr.Textbox()\n", " with gr.Row():\n", " load_set_btn = gr.Button(\"Load Set\")\n", " load_nested_set_btn = gr.Button(\"Load Nested Set\")\n", " load_random_btn = gr.Button(\"Load Random\")\n", " clean_imgs_btn = gr.Button(\"Clean Images\")\n", " wait_btn = gr.Button(\"Wait\")\n", " do_all_btn = gr.Button(\"Do All\")\n", " track_tqdm_btn = gr.Button(\"Bind TQDM\")\n", " bind_internal_tqdm_btn = gr.Button(\"Bind Internal TQDM\")\n", "\n", " text2 = gr.Textbox()\n", "\n", " # track list\n", " def load_set(text, text2, progress=gr.Progress()):\n", " imgs = [None] * 24\n", " for img in progress.tqdm(imgs, desc=\"Loading from list\"):\n", " time.sleep(0.1)\n", " return \"done\"\n", " load_set_btn.click(load_set, [text, textb], text2)\n", "\n", " # track nested list\n", " def load_nested_set(text, text2, progress=gr.Progress()):\n", " imgs = [[None] * 8] * 3\n", " for img_set in progress.tqdm(imgs, desc=\"Nested list\"):\n", " time.sleep(2)\n", " for img in progress.tqdm(img_set, desc=\"inner list\"):\n", " time.sleep(0.1)\n", " return \"done\"\n", " load_nested_set_btn.click(load_nested_set, [text, textb], text2)\n", "\n", " # track iterable of unknown length\n", " def load_random(data, progress=gr.Progress()):\n", " def yielder():\n", " for i in range(0, random.randint(15, 20)):\n", " time.sleep(0.1)\n", " yield None\n", " for img in progress.tqdm(yielder()):\n", " pass\n", " return \"done\"\n", " load_random_btn.click(load_random, {text, textb}, text2)\n", " \n", " # manual progress\n", " def clean_imgs(text, progress=gr.Progress()):\n", " progress(0.2, desc=\"Collecting Images\")\n", " time.sleep(1)\n", " progress(0.5, desc=\"Cleaning Images\")\n", " time.sleep(1.5)\n", " progress(0.8, desc=\"Sending Images\")\n", " time.sleep(1.5)\n", " return \"done\"\n", " clean_imgs_btn.click(clean_imgs, text, text2)\n", "\n", " # no progress\n", " def wait(text):\n", " time.sleep(4)\n", " return \"done\"\n", " wait_btn.click(wait, text, text2)\n", "\n", " # multiple progressions\n", " def do_all(data, progress=gr.Progress()):\n", " load_set(data[text], data[textb], progress)\n", " load_random(data, progress)\n", " clean_imgs(data[text], progress)\n", " progress(None)\n", " wait(text)\n", " return \"done\"\n", " do_all_btn.click(do_all, {text, textb}, text2)\n", "\n", " def track_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n", " for i in tqdm.tqdm(range(5), desc=\"outer\"):\n", " for j in tqdm.tqdm(range(4), desc=\"inner\"):\n", " time.sleep(1)\n", " return \"done\"\n", " track_tqdm_btn.click(track_tqdm, {text, textb}, text2)\n", "\n", " def bind_internal_tqdm(data, progress=gr.Progress(track_tqdm=True)):\n", " outdir = \"__tmp/\" + str(uuid4())\n", " load_dataset(\"beans\", split=\"train\", cache_dir=outdir)\n", " shutil.rmtree(outdir)\n", " return \"done\"\n", " bind_internal_tqdm_btn.click(bind_internal_tqdm, {text, textb}, text2)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue(concurrency_count=20).launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -87,7 +87,7 @@ with gr.Blocks() as demo:
|
||||
|
||||
def bind_internal_tqdm(data, progress=gr.Progress(track_tqdm=True)):
|
||||
outdir = "__tmp/" + str(uuid4())
|
||||
dataset = load_dataset("beans", split="train", cache_dir=outdir)
|
||||
load_dataset("beans", split="train", cache_dir=outdir)
|
||||
shutil.rmtree(outdir)
|
||||
return "done"
|
||||
bind_internal_tqdm_btn.click(bind_internal_tqdm, {text, textb}, text2)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: progress_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tqdm"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time \n", "import tqdm\n", "\n", "css = \"footer {display: none !important;} .gradio-container {min-height: 0px !important;}\"\n", "\n", "def load_set(progress=gr.Progress()):\n", " imgs = [None] * 24\n", " for img in progress.tqdm(imgs, desc=\"Loading...\"):\n", " time.sleep(0.1)\n", " return \"Loaded\"\n", "\n", "\n", "with gr.Blocks(css=css) as demo:\n", " load = gr.Button(\"Load\")\n", " label = gr.Label(label=\"Loader\")\n", " load.click(load_set, outputs=label)\n", "\n", "demo.queue(concurrency_count=20).launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: progress_component"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tqdm"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time \n", "\n", "css = \"footer {display: none !important;} .gradio-container {min-height: 0px !important;}\"\n", "\n", "def load_set(progress=gr.Progress()):\n", " imgs = [None] * 24\n", " for img in progress.tqdm(imgs, desc=\"Loading...\"):\n", " time.sleep(0.1)\n", " return \"Loaded\"\n", "\n", "\n", "with gr.Blocks(css=css) as demo:\n", " load = gr.Button(\"Load\")\n", " label = gr.Label(label=\"Loader\")\n", " load.click(load_set, outputs=label)\n", "\n", "demo.queue(concurrency_count=20).launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,5 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
import tqdm
|
||||
|
||||
css = "footer {display: none !important;} .gradio-container {min-height: 0px !important;}"
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from torch import autocast\n", "from diffusers import StableDiffusionPipeline\n", "from datasets import load_dataset\n", "from PIL import Image \n", "import re\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16)\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed): \n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if(images_list[\"nsfw_content_detected\"][i]):\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", " \n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Box():\n", " with gr.Row().style(mobile_collapse=False, equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " border=(True, False, True, True),\n", " rounded=(True, False, False, True),\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(\n", " margin=False,\n", " rounded=(False, True, True, False),\n", " )\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(grid=[2], height=\"auto\")\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", " \n", "block.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image \n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16)\n", "pipe = pipe.to(device)\n", "\n", "def infer(prompt, samples, steps, scale, seed): \n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if(images_list[\"nsfw_content_detected\"][i]):\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", " \n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Box():\n", " with gr.Row().style(mobile_collapse=False, equal_height=True):\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " show_label=False,\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " ).style(\n", " border=(True, False, True, True),\n", " rounded=(True, False, False, True),\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\").style(\n", " margin=False,\n", " rounded=(False, True, True, False),\n", " )\n", " gallery = gr.Gallery(\n", " label=\"Generated images\", show_label=False, elem_id=\"gallery\"\n", " ).style(grid=[2], height=\"auto\")\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " text.submit(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " btn.click(infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", " \n", "block.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,10 +1,7 @@
|
||||
import gradio as gr
|
||||
import torch
|
||||
from torch import autocast
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from datasets import load_dataset
|
||||
from PIL import Image
|
||||
import re
|
||||
import os
|
||||
|
||||
auth_token = os.getenv("auth_token")
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: webcam"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "\n", "import gradio as gr\n", "\n", "\n", "def snap(image, video):\n", " return [image, video]\n", "\n", "\n", "demo = gr.Interface(\n", " snap,\n", " [gr.Image(source=\"webcam\", tool=None), gr.Video(source=\"webcam\")],\n", " [\"image\", \"video\"],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: webcam"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["\n", "import gradio as gr\n", "\n", "\n", "def snap(image, video):\n", " return [image, video]\n", "\n", "\n", "demo = gr.Interface(\n", " snap,\n", " [gr.Image(source=\"webcam\", tool=None), gr.Video(source=\"webcam\")],\n", " [\"image\", \"video\"],\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,4 +1,3 @@
|
||||
import numpy as np
|
||||
|
||||
import gradio as gr
|
||||
|
||||
|
@ -16,8 +16,8 @@ from gradio.components import (
|
||||
Carousel,
|
||||
Chatbot,
|
||||
Checkbox,
|
||||
Checkboxgroup,
|
||||
CheckboxGroup,
|
||||
Checkboxgroup,
|
||||
Code,
|
||||
ColorPicker,
|
||||
DataFrame,
|
||||
@ -27,8 +27,8 @@ from gradio.components import (
|
||||
File,
|
||||
Gallery,
|
||||
Highlight,
|
||||
Highlightedtext,
|
||||
HighlightedText,
|
||||
Highlightedtext,
|
||||
Image,
|
||||
Interpretation,
|
||||
Json,
|
||||
@ -62,9 +62,8 @@ from gradio.flagging import (
|
||||
HuggingFaceDatasetSaver,
|
||||
SimpleCSVLogger,
|
||||
)
|
||||
from gradio.helpers import EventData, Progress
|
||||
from gradio.helpers import EventData, Progress, make_waveform, skip, update
|
||||
from gradio.helpers import create_examples as Examples
|
||||
from gradio.helpers import make_waveform, skip, update
|
||||
from gradio.interface import Interface, TabbedInterface, close_all
|
||||
from gradio.ipython_ext import load_ipython_extension
|
||||
from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs
|
||||
|
@ -887,7 +887,6 @@ class Slider(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -1019,7 +1018,6 @@ class Checkbox(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -1140,7 +1138,6 @@ class CheckboxGroup(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -1322,7 +1319,6 @@ class Radio(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -1511,7 +1507,6 @@ class Dropdown(
|
||||
"choices": choices,
|
||||
"label": label,
|
||||
"show_label": show_label,
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
@ -1707,7 +1702,6 @@ class Image(
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"brush_radius": brush_radius,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -2054,7 +2048,6 @@ class Video(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -2274,7 +2267,6 @@ class Audio(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -2591,7 +2583,6 @@ class File(
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -2856,7 +2847,6 @@ class Dataframe(Changeable, Selectable, IOComponent, JSONSerializable):
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -3073,7 +3063,6 @@ class Timeseries(Changeable, IOComponent, JSONSerializable):
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
@ -3331,7 +3320,6 @@ class UploadButton(Clickable, Uploadable, IOComponent, FileSerializable):
|
||||
"interactive": interactive,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"interactive": interactive,
|
||||
"__type__": "update",
|
||||
}
|
||||
|
||||
|
@ -344,7 +344,7 @@ def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs
|
||||
"examples": example_data,
|
||||
}
|
||||
|
||||
if p is None or not (p in pipelines):
|
||||
if p is None or p not in pipelines:
|
||||
raise ValueError("Unsupported pipeline type: {}".format(p))
|
||||
|
||||
pipeline = pipelines[p]
|
||||
|
@ -172,7 +172,7 @@ class Examples:
|
||||
input_has_examples = [False] * len(inputs)
|
||||
for example in examples:
|
||||
for idx, example_for_input in enumerate(example):
|
||||
if not (example_for_input is None):
|
||||
if example_for_input is not None:
|
||||
try:
|
||||
input_has_examples[idx] = True
|
||||
except IndexError:
|
||||
|
@ -122,7 +122,7 @@ class Queue:
|
||||
await asyncio.sleep(self.sleep_when_free)
|
||||
continue
|
||||
|
||||
if not (None in self.active_jobs):
|
||||
if None not in self.active_jobs:
|
||||
await asyncio.sleep(self.sleep_when_free)
|
||||
continue
|
||||
# Using mutex to avoid editing a list in use
|
||||
|
@ -160,7 +160,7 @@ class App(FastAPI):
|
||||
@app.get("/login_check")
|
||||
@app.get("/login_check/")
|
||||
def login_check(user: str = Depends(get_current_user)):
|
||||
if app.auth is None or not (user is None):
|
||||
if app.auth is None or user is not None:
|
||||
return
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated"
|
||||
@ -219,7 +219,7 @@ class App(FastAPI):
|
||||
mimetypes.add_type("application/javascript", ".js")
|
||||
blocks = app.get_blocks()
|
||||
|
||||
if app.auth is None or not (user is None):
|
||||
if app.auth is None or user is not None:
|
||||
config = app.get_blocks().config
|
||||
else:
|
||||
config = {
|
||||
|
@ -187,6 +187,7 @@ def launched_telemetry(blocks: gradio.Blocks, data: Dict[str, Any]) -> None:
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
threading.Thread(target=launched_telemtry_thread, args=(data,)).start()
|
||||
|
||||
|
||||
|
@ -60,3 +60,27 @@ include = [
|
||||
"/README.md",
|
||||
"/requirements.txt",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
target-version = "py37"
|
||||
extend-select = [
|
||||
"I",
|
||||
]
|
||||
ignore = [
|
||||
"E501", # from scripts/lint_backend.sh
|
||||
"E722", # from scripts/lint_backend.sh
|
||||
"E731", # from scripts/lint_backend.sh
|
||||
"F403", # from scripts/lint_backend.sh
|
||||
"F541", # from scripts/lint_backend.sh
|
||||
]
|
||||
|
||||
[tool.ruff.per-file-ignores]
|
||||
"demo/*" = [
|
||||
"E402", # Demos may have imports not at the top
|
||||
"E741", # Demos may have ambiguous variable names
|
||||
"F405", # Demos may use star imports
|
||||
"I", # Don't care about import order
|
||||
]
|
||||
"gradio/__init__.py" = [
|
||||
"F401", # "Imported but unused" (TODO: it would be better to be explicit and use __all__)
|
||||
]
|
||||
|
@ -16,15 +16,17 @@ to with the -o parameter:
|
||||
>> python scripts/benchmark_queue.py -n 1000 -o results.json
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import asyncio
|
||||
import json
|
||||
import random
|
||||
import time
|
||||
|
||||
import pandas as pd
|
||||
import websockets
|
||||
|
||||
import gradio as gr
|
||||
from gradio import media_data
|
||||
import asyncio
|
||||
import websockets
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
import pandas as pd
|
||||
import argparse
|
||||
|
||||
|
||||
def identity_with_sleep(x):
|
||||
|
@ -1,6 +1,6 @@
|
||||
import urllib.request
|
||||
import json
|
||||
import json
|
||||
import sys
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
|
||||
root_directory = Path(__file__).parent.parent
|
||||
|
@ -1,8 +1,8 @@
|
||||
import shutil
|
||||
import argparse
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import textwrap
|
||||
import argparse
|
||||
|
||||
|
||||
def copy_all_demos(source_dir: str, dest_dir: str):
|
||||
|
@ -1,4 +1,5 @@
|
||||
import argparse
|
||||
|
||||
import requests
|
||||
|
||||
WORKFLOW_RUN_ENDPOINT = "https://api.github.com/repos/{owner}/{repo}/actions/runs/{run_id}/artifacts"
|
||||
|
@ -3,8 +3,7 @@
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Formatting the backend... Our style follows the Black code style."
|
||||
python -m black gradio test
|
||||
python -m isort --profile=black gradio test
|
||||
python -m flake8 --ignore=E731,E501,E722,W503,E126,E203,F403 gradio test --exclude gradio/__init__.py
|
||||
ruff gradio test
|
||||
black gradio test
|
||||
|
||||
bash client/python/scripts/format.sh # Call the client library's formatting script
|
||||
|
@ -1,5 +1,5 @@
|
||||
import pathlib
|
||||
import argparse
|
||||
import pathlib
|
||||
import textwrap
|
||||
|
||||
current_dir = (pathlib.Path(__file__).parent / "..").resolve()
|
||||
|
@ -1,7 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
python -m black --check gradio test client/python/gradio_client
|
||||
python -m isort --profile=black --check-only gradio test client/python/gradio_client
|
||||
python -m flake8 --ignore=E731,E501,E722,W503,E126,E203,F403,F541 gradio test client/python/gradio_client --exclude gradio/__init__.py,client/python/gradio_client/__init__.py
|
||||
ruff gradio test client
|
||||
black --check gradio test client
|
||||
|
@ -51,8 +51,6 @@ filelock==3.7.1
|
||||
# via
|
||||
# huggingface-hub
|
||||
# transformers
|
||||
flake8==4.0.1
|
||||
# via -r requirements.in
|
||||
h11==0.12.0
|
||||
# via httpcore
|
||||
httpcore==0.15.0
|
||||
@ -75,7 +73,6 @@ imageio==2.19.5
|
||||
importlib-metadata==4.2.0
|
||||
# via
|
||||
# click
|
||||
# flake8
|
||||
# huggingface-hub
|
||||
# jsonschema
|
||||
# pluggy
|
||||
@ -87,8 +84,6 @@ iniconfig==1.1.1
|
||||
# via pytest
|
||||
ipython==7.34.0
|
||||
# via -r requirements.in
|
||||
isort==5.10.1
|
||||
# via -r requirements.in
|
||||
jedi==0.18.1
|
||||
# via ipython
|
||||
jinja2==3.1.2
|
||||
@ -107,8 +102,6 @@ markupsafe==2.1.1
|
||||
# via jinja2
|
||||
matplotlib-inline==0.1.3
|
||||
# via ipython
|
||||
mccabe==0.6.1
|
||||
# via flake8
|
||||
mypy-extensions==0.4.3
|
||||
# via black
|
||||
networkx==2.6.3
|
||||
@ -162,14 +155,10 @@ ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
py==1.11.0
|
||||
# via pytest
|
||||
pycodestyle==2.8.0
|
||||
# via flake8
|
||||
pydantic==1.9.1
|
||||
# via
|
||||
# -r requirements.in
|
||||
# fastapi
|
||||
pyflakes==2.4.0
|
||||
# via flake8
|
||||
pygments==2.12.0
|
||||
# via ipython
|
||||
pyparsing==3.0.9
|
||||
@ -207,6 +196,8 @@ respx==0.19.2
|
||||
# via -r requirements.in
|
||||
rfc3986[idna2008]==1.5.0
|
||||
# via httpx
|
||||
ruff==0.0.260
|
||||
# via -r requirements.in
|
||||
s3transfer==0.6.0
|
||||
# via boto3
|
||||
scikit-image==0.19.3
|
||||
|
@ -1,23 +1,22 @@
|
||||
# Don't forget to run bash scripts/create_test_requirements.sh and scripts/create_test_requirements-37.sh from unix or wsl when you update this file.
|
||||
asyncio
|
||||
boto3
|
||||
IPython
|
||||
altair
|
||||
asyncio
|
||||
black
|
||||
boto3
|
||||
coverage
|
||||
torch
|
||||
transformers
|
||||
fastapi>=0.87.0
|
||||
httpx
|
||||
huggingface_hub
|
||||
pydantic
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
ruff>=0.0.260
|
||||
respx
|
||||
scikit-image
|
||||
shap
|
||||
pytest
|
||||
huggingface_hub
|
||||
pytest-cov
|
||||
pytest-asyncio
|
||||
black
|
||||
isort
|
||||
flake8
|
||||
httpx
|
||||
pydantic
|
||||
respx
|
||||
fastapi>=0.87.0
|
||||
altair
|
||||
torch
|
||||
tqdm
|
||||
transformers
|
||||
vega_datasets
|
||||
tqdm
|
@ -51,8 +51,6 @@ filelock==3.7.1
|
||||
# via
|
||||
# huggingface-hub
|
||||
# transformers
|
||||
flake8==4.0.1
|
||||
# via -r requirements.in
|
||||
h11==0.12.0
|
||||
# via httpcore
|
||||
httpcore==0.15.0
|
||||
@ -76,8 +74,6 @@ iniconfig==1.1.1
|
||||
# via pytest
|
||||
ipython==7.34.0
|
||||
# via -r requirements.in
|
||||
isort==5.10.1
|
||||
# via -r requirements.in
|
||||
jedi==0.18.1
|
||||
# via ipython
|
||||
jinja2==3.1.2
|
||||
@ -96,8 +92,6 @@ markupsafe==2.1.1
|
||||
# via jinja2
|
||||
matplotlib-inline==0.1.3
|
||||
# via ipython
|
||||
mccabe==0.6.1
|
||||
# via flake8
|
||||
mypy-extensions==0.4.3
|
||||
# via black
|
||||
networkx==2.6.3
|
||||
@ -151,14 +145,10 @@ ptyprocess==0.7.0
|
||||
# via pexpect
|
||||
py==1.11.0
|
||||
# via pytest
|
||||
pycodestyle==2.8.0
|
||||
# via flake8
|
||||
pydantic==1.9.1
|
||||
# via
|
||||
# -r requirements.in
|
||||
# fastapi
|
||||
pyflakes==2.4.0
|
||||
# via flake8
|
||||
pygments==2.12.0
|
||||
# via ipython
|
||||
pyparsing==3.0.9
|
||||
@ -194,6 +184,8 @@ requests==2.28.1
|
||||
# transformers
|
||||
respx==0.19.2
|
||||
# via -r requirements.in
|
||||
ruff==0.0.260
|
||||
# via -r requirements.in
|
||||
rfc3986[idna2008]==1.5.0
|
||||
# via httpx
|
||||
s3transfer==0.6.0
|
||||
|
@ -685,9 +685,7 @@ class TestCallFunction:
|
||||
class TestBatchProcessing:
|
||||
def test_raise_exception_if_batching_an_event_thats_not_queued(self):
|
||||
def trim(words, lens):
|
||||
trimmed_words = []
|
||||
for w, l in zip(words, lens):
|
||||
trimmed_words.append(w[: int(l)])
|
||||
trimmed_words = [word[: int(length)] for word, length in zip(words, lens)]
|
||||
return [trimmed_words]
|
||||
|
||||
msg = "In order to use batching, the queue must be enabled."
|
||||
|
@ -258,9 +258,7 @@ class TestProcessExamples:
|
||||
@pytest.mark.asyncio
|
||||
async def test_caching_with_batch(self):
|
||||
def trim_words(words, lens):
|
||||
trimmed_words = []
|
||||
for w, l in zip(words, lens):
|
||||
trimmed_words.append(w[:l])
|
||||
trimmed_words = [word[:length] for word, length in zip(words, lens)]
|
||||
return [trimmed_words]
|
||||
|
||||
io = gr.Interface(
|
||||
@ -278,9 +276,7 @@ class TestProcessExamples:
|
||||
@pytest.mark.asyncio
|
||||
async def test_caching_with_batch_multiple_outputs(self):
|
||||
def trim_words(words, lens):
|
||||
trimmed_words = []
|
||||
for w, l in zip(words, lens):
|
||||
trimmed_words.append(w[:l])
|
||||
trimmed_words = [word[:length] for word, length in zip(words, lens)]
|
||||
return trimmed_words, lens
|
||||
|
||||
io = gr.Interface(
|
||||
|
@ -1,8 +1,7 @@
|
||||
import time
|
||||
import requests
|
||||
import warnings
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from homepage.utils import get_latest_stable
|
||||
|
||||
VERSION_TXT = os.path.abspath(os.path.join(os.getcwd(), "..", "gradio", "version.txt"))
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import jinja2
|
||||
from src import index, guides, docs, demos, changelog
|
||||
import requests
|
||||
from src import docs
|
||||
from utils import get_latest_stable
|
||||
|
||||
SRC_DIR = "src"
|
||||
|
@ -1,9 +1,9 @@
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
|
||||
import jinja2
|
||||
from src import index, guides, docs, demos, changelog
|
||||
import argparse
|
||||
import requests
|
||||
from src import changelog, demos, docs, guides, index
|
||||
from utils import get_latest_stable
|
||||
|
||||
SRC_DIR = "src"
|
||||
|
@ -1,6 +1,7 @@
|
||||
from upload_demos import demos, upload_demo_to_space, AUTH_TOKEN, latest_gradio_stable
|
||||
from gradio.networking import url_ok
|
||||
import huggingface_hub
|
||||
from upload_demos import AUTH_TOKEN, demos, latest_gradio_stable, upload_demo_to_space
|
||||
|
||||
from gradio.networking import url_ok
|
||||
|
||||
for demo in demos:
|
||||
space_id = "gradio/" + demo
|
||||
|
@ -1,8 +1,8 @@
|
||||
import os
|
||||
import markdown2
|
||||
import shutil
|
||||
import re
|
||||
|
||||
import markdown2
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
INNER_TEMPLATE_FILE = os.path.join(DIR, "inner_template.html")
|
||||
CHANGELOG_FILE = os.path.join(DIR, "..", "..", "..", "..", "CHANGELOG.md")
|
||||
|
@ -1,5 +1,4 @@
|
||||
import os
|
||||
import json
|
||||
|
||||
GRADIO_DEMO_DIR = os.path.abspath(os.path.join(os.getcwd(), "..", "..", "demo"))
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
@ -1,6 +1,8 @@
|
||||
import os
|
||||
from gradio.documentation import generate_documentation, document_cls
|
||||
|
||||
from gradio.documentation import document_cls, generate_documentation
|
||||
from gradio.events import EventListener
|
||||
|
||||
from ..guides import guides
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
@ -166,7 +168,6 @@ def build(output_dir, jinja_env, gradio_wheel_url, gradio_version):
|
||||
|
||||
|
||||
def build_pip_template(version, jinja_env):
|
||||
docs_files = os.listdir("src/docs")
|
||||
template = jinja_env.get_template("docs/template.html")
|
||||
output = template.render(
|
||||
docs=docs, find_cls=find_cls, version="pip", gradio_version=version, canonical_suffix="", ordered_events=ordered_events
|
||||
|
@ -1,7 +1,8 @@
|
||||
import os
|
||||
import markdown2
|
||||
import shutil
|
||||
import re
|
||||
import shutil
|
||||
|
||||
import markdown2
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
TEMPLATE_FILE = os.path.join(DIR, "template.html")
|
||||
|
@ -1,5 +1,6 @@
|
||||
import os
|
||||
import json
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
@ -1,13 +1,12 @@
|
||||
import argparse
|
||||
import os
|
||||
import pathlib
|
||||
import shutil
|
||||
import tempfile
|
||||
import textwrap
|
||||
from typing import Optional
|
||||
|
||||
import huggingface_hub
|
||||
import os
|
||||
import json
|
||||
import argparse
|
||||
import requests
|
||||
from utils import get_latest_stable
|
||||
|
||||
AUTH_TOKEN = os.getenv("AUTH_TOKEN")
|
||||
|
@ -1,4 +1,5 @@
|
||||
import requests
|
||||
import requests
|
||||
|
||||
|
||||
def get_latest_stable():
|
||||
return requests.get("https://pypi.org/pypi/gradio/json").json()["info"]["version"]
|
||||
|
Loading…
x
Reference in New Issue
Block a user