mirror of
https://github.com/gradio-app/gradio.git
synced 2025-04-06 12:30:29 +08:00
Use covariant container types across the codebase and add typing to our demos (#8854)
* more typing * add changeset * tweaks * more changes * more fixes * more changes * more fixes * more fixes * delete * add changeset * notebooks * restore * restore * format * add changeset * more typing fixes * fixes * change * fixes * fix * format * more fixes * fixes * fixes for python3.9 * demo * fix * fixes * fix typo * type * formatting * add changeset --------- Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com> Co-authored-by: aliabd <ali.si3luwa@gmail.com>
This commit is contained in:
parent
38c2ad425a
commit
d1f044145a
6
.changeset/soft-squids-guess.md
Normal file
6
.changeset/soft-squids-guess.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
"gradio": patch
|
||||
"website": patch
|
||||
---
|
||||
|
||||
fix:Use covariant container types across the codebase and add typing to our demos
|
Binary file not shown.
Before Width: | Height: | Size: 32 KiB |
Binary file not shown.
Before Width: | Height: | Size: 103 KiB |
@ -1,7 +0,0 @@
|
||||
-f https://download.pytorch.org/whl/torch_stable.html
|
||||
numpy
|
||||
matplotlib
|
||||
wget
|
||||
torch
|
||||
torchvision
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,88 +0,0 @@
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
import wget
|
||||
|
||||
|
||||
destination_folder = "output"
|
||||
destination_for_weights = "weights"
|
||||
|
||||
if os.path.exists(destination_for_weights):
|
||||
print("The weights are at", destination_for_weights)
|
||||
else:
|
||||
print("Creating folder at ", destination_for_weights, " to store weights")
|
||||
os.mkdir(destination_for_weights)
|
||||
|
||||
segmentationWeightsURL = 'https://github.com/douyang/EchoNetDynamic/releases/download/v1.0.0/deeplabv3_resnet50_random.pt'
|
||||
|
||||
if not os.path.exists(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL))):
|
||||
print("Downloading Segmentation Weights, ", segmentationWeightsURL," to ",os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))
|
||||
filename = wget.download(segmentationWeightsURL, out = destination_for_weights)
|
||||
else:
|
||||
print("Segmentation Weights already present")
|
||||
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
def collate_fn(x):
|
||||
x, f = zip(*x)
|
||||
i = list(map(lambda t: t.shape[1], x))
|
||||
x = torch.as_tensor(np.swapaxes(np.concatenate(x, 1), 0, 1))
|
||||
return x, f, i
|
||||
|
||||
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=False, aux_loss=False)
|
||||
model.classifier[-1] = torch.nn.Conv2d(model.classifier[-1].in_channels, 1, kernel_size=model.classifier[-1].kernel_size)
|
||||
|
||||
print("loading weights from ", os.path.join(destination_for_weights, "deeplabv3_resnet50_random"))
|
||||
|
||||
if torch.cuda.is_available():
|
||||
print("cuda is available, original weights")
|
||||
device = torch.device("cuda")
|
||||
model = torch.nn.DataParallel(model)
|
||||
model.to(device)
|
||||
checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)))
|
||||
model.load_state_dict(checkpoint['state_dict'])
|
||||
else:
|
||||
print("cuda is not available, cpu weights")
|
||||
device = torch.device("cpu")
|
||||
checkpoint = torch.load(os.path.join(destination_for_weights, os.path.basename(segmentationWeightsURL)), map_location = "cpu")
|
||||
state_dict_cpu = {k[7:]: v for (k, v) in checkpoint['state_dict'].items()}
|
||||
model.load_state_dict(state_dict_cpu)
|
||||
|
||||
model.eval()
|
||||
|
||||
def segment(input):
|
||||
inp = input
|
||||
x = inp.transpose([2, 0, 1]) # channels-first
|
||||
x = np.expand_dims(x, axis=0) # adding a batch dimension
|
||||
|
||||
mean = x.mean(axis=(0, 2, 3))
|
||||
std = x.std(axis=(0, 2, 3))
|
||||
x = x - mean.reshape(1, 3, 1, 1)
|
||||
x = x / std.reshape(1, 3, 1, 1)
|
||||
|
||||
with torch.no_grad():
|
||||
x = torch.from_numpy(x).type('torch.FloatTensor').to(device)
|
||||
output = model(x)
|
||||
|
||||
y = output['out'].numpy()
|
||||
y = y.squeeze()
|
||||
|
||||
out = y>0
|
||||
|
||||
mask = inp.copy()
|
||||
mask[out] = np.array([0, 0, 255])
|
||||
|
||||
return mask
|
||||
|
||||
import gradio as gr
|
||||
|
||||
i = gr.Image(label="Echocardiogram")
|
||||
o = gr.Image(label="Segmentation Mask")
|
||||
|
||||
examples = [["img1.jpg"], ["img2.jpg"]]
|
||||
title = None #"Left Ventricle Segmentation"
|
||||
description = "This semantic segmentation model identifies the left ventricle in echocardiogram images."
|
||||
# videos. Accurate evaluation of the motion and size of the left ventricle is crucial for the assessment of cardiac function and ejection fraction. In this interface, the user inputs apical-4-chamber images from echocardiography videos and the model will output a prediction of the localization of the left ventricle in blue. This model was trained on the publicly released EchoNet-Dynamic dataset of 10k echocardiogram videos with 20k expert annotations of the left ventricle and published as part of ‘Video-based AI for beat-to-beat assessment of cardiac function’ by Ouyang et al. in Nature, 2020."
|
||||
thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-echonet/master/thumbnail.png"
|
||||
gr.Interface(segment, i, o, examples=examples, analytics_enabled=False, thumbnail=thumbnail, cache_examples=False).launch()
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "\n", "llm_engine = HfEngine(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n", "# Initialize the agent with both tools\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "\n", "def interact_with_agent(prompt, messages):\n", " messages.append(ChatMessage(role=\"user\", content=prompt))\n", " yield messages\n", " for msg in stream_from_transformers_agent(agent, prompt):\n", " messages.append(msg)\n", " yield messages\n", " yield messages\n", "\n", "\n", "with gr.Blocks() as demo:\n", " stored_message = gr.State([])\n", " chatbot = gr.Chatbot(label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(None, \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\"))\n", " text_input = gr.Textbox(lines=1, label=\"Chat Message\")\n", " text_input.submit(lambda s: (s, \"\"), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: agent_chatbot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/agent_chatbot/utils.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from gradio import ChatMessage\n", "from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore\n", "from utils import stream_from_transformers_agent\n", "\n", "# Import tool from Hub\n", "image_generation_tool = load_tool(\"m-ric/text-to-image\")\n", "\n", "\n", "llm_engine = HfEngine(\"meta-llama/Meta-Llama-3-70B-Instruct\")\n", "# Initialize the agent with both tools\n", "agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)\n", "\n", "\n", "def interact_with_agent(prompt, messages):\n", " messages.append(ChatMessage(role=\"user\", content=prompt))\n", " yield messages\n", " for msg in stream_from_transformers_agent(agent, prompt):\n", " messages.append(msg)\n", " yield messages\n", " yield messages\n", "\n", "\n", "with gr.Blocks() as demo:\n", " stored_message = gr.State([])\n", " chatbot = gr.Chatbot(label=\"Agent\",\n", " type=\"messages\",\n", " avatar_images=(None, \"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png\"))\n", " text_input = gr.Textbox(lines=1, label=\"Chat Message\")\n", " text_input.submit(lambda s: (s, \"\"), [text_input], [stored_message, text_input]).then(interact_with_agent, [stored_message, chatbot], [chatbot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,6 @@
|
||||
import gradio as gr
|
||||
from gradio import ChatMessage
|
||||
from transformers import load_tool, ReactCodeAgent, HfEngine
|
||||
from transformers import load_tool, ReactCodeAgent, HfEngine # type: ignore
|
||||
from utils import stream_from_transformers_agent
|
||||
|
||||
# Import tool from Hub
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from gradio import ChatMessage
|
||||
from transformers.agents import ReactCodeAgent, agent_types
|
||||
from typing import Generator
|
||||
@ -37,7 +39,8 @@ def stream_from_transformers_agent(
|
||||
|
||||
class Output:
|
||||
output: agent_types.AgentType | str = None
|
||||
|
||||
|
||||
step_log = None
|
||||
for step_log in agent.run(prompt, stream=True):
|
||||
if isinstance(step_log, dict):
|
||||
for message in pull_message(step_log):
|
||||
@ -48,16 +51,16 @@ def stream_from_transformers_agent(
|
||||
Output.output = step_log
|
||||
if isinstance(Output.output, agent_types.AgentText):
|
||||
yield ChatMessage(
|
||||
role="assistant", content=f"**Final answer:**\n```\n{Output.output.to_string()}\n```")
|
||||
role="assistant", content=f"**Final answer:**\n```\n{Output.output.to_string()}\n```") # type: ignore
|
||||
elif isinstance(Output.output, agent_types.AgentImage):
|
||||
yield ChatMessage(
|
||||
role="assistant",
|
||||
content={"path": Output.output.to_string(), "mime_type": "image/png"},
|
||||
content={"path": Output.output.to_string(), "mime_type": "image/png"}, # type: ignore
|
||||
)
|
||||
elif isinstance(Output.output, agent_types.AgentAudio):
|
||||
yield ChatMessage(
|
||||
role="assistant",
|
||||
content={"path": Output.output.to_string(), "mime_type": "audio/wav"},
|
||||
content={"path": Output.output.to_string(), "mime_type": "audio/wav"}, # type: ignore
|
||||
)
|
||||
else:
|
||||
return ChatMessage(role="assistant", content=Output.output)
|
||||
|
@ -1,2 +0,0 @@
|
||||
altair
|
||||
vega_datasets
|
File diff suppressed because one or more lines are too long
@ -1,140 +0,0 @@
|
||||
import altair as alt
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from vega_datasets import data
|
||||
|
||||
|
||||
def make_plot(plot_type):
|
||||
if plot_type == "scatter_plot":
|
||||
cars = data.cars()
|
||||
return alt.Chart(cars).mark_point().encode(
|
||||
x='Horsepower',
|
||||
y='Miles_per_Gallon',
|
||||
color='Origin',
|
||||
)
|
||||
elif plot_type == "heatmap":
|
||||
# Compute x^2 + y^2 across a 2D grid
|
||||
x, y = np.meshgrid(range(-5, 5), range(-5, 5))
|
||||
z = x ** 2 + y ** 2
|
||||
|
||||
# Convert this grid to columnar data expected by Altair
|
||||
source = pd.DataFrame({'x': x.ravel(),
|
||||
'y': y.ravel(),
|
||||
'z': z.ravel()})
|
||||
return alt.Chart(source).mark_rect().encode(
|
||||
x='x:O',
|
||||
y='y:O',
|
||||
color='z:Q'
|
||||
)
|
||||
elif plot_type == "us_map":
|
||||
states = alt.topo_feature(data.us_10m.url, 'states')
|
||||
source = data.income.url
|
||||
|
||||
return alt.Chart(source).mark_geoshape().encode(
|
||||
shape='geo:G',
|
||||
color='pct:Q',
|
||||
tooltip=['name:N', 'pct:Q'],
|
||||
facet=alt.Facet('group:N', columns=2),
|
||||
).transform_lookup(
|
||||
lookup='id',
|
||||
from_=alt.LookupData(data=states, key='id'),
|
||||
as_='geo'
|
||||
).properties(
|
||||
width=300,
|
||||
height=175,
|
||||
).project(
|
||||
type='albersUsa'
|
||||
)
|
||||
elif plot_type == "interactive_barplot":
|
||||
source = data.movies.url
|
||||
|
||||
pts = alt.selection(type="single", encodings=['x'])
|
||||
|
||||
rect = alt.Chart(data.movies.url).mark_rect().encode(
|
||||
alt.X('IMDB_Rating:Q', bin=True),
|
||||
alt.Y('Rotten_Tomatoes_Rating:Q', bin=True),
|
||||
alt.Color('count()',
|
||||
scale=alt.Scale(scheme='greenblue'),
|
||||
legend=alt.Legend(title='Total Records')
|
||||
)
|
||||
)
|
||||
|
||||
circ = rect.mark_point().encode(
|
||||
alt.ColorValue('grey'),
|
||||
alt.Size('count()',
|
||||
legend=alt.Legend(title='Records in Selection')
|
||||
)
|
||||
).transform_filter(
|
||||
pts
|
||||
)
|
||||
|
||||
bar = alt.Chart(source).mark_bar().encode(
|
||||
x='Major_Genre:N',
|
||||
y='count()',
|
||||
color=alt.condition(pts, alt.ColorValue("steelblue"), alt.ColorValue("grey"))
|
||||
).properties(
|
||||
width=550,
|
||||
height=200
|
||||
).add_selection(pts)
|
||||
|
||||
plot = alt.vconcat(
|
||||
rect + circ,
|
||||
bar
|
||||
).resolve_legend(
|
||||
color="independent",
|
||||
size="independent"
|
||||
)
|
||||
return plot
|
||||
elif plot_type == "radial":
|
||||
source = pd.DataFrame({"values": [12, 23, 47, 6, 52, 19]})
|
||||
|
||||
base = alt.Chart(source).encode(
|
||||
theta=alt.Theta("values:Q", stack=True),
|
||||
radius=alt.Radius("values", scale=alt.Scale(type="sqrt", zero=True, rangeMin=20)),
|
||||
color="values:N",
|
||||
)
|
||||
|
||||
c1 = base.mark_arc(innerRadius=20, stroke="#fff")
|
||||
|
||||
c2 = base.mark_text(radiusOffset=10).encode(text="values:Q")
|
||||
|
||||
return c1 + c2
|
||||
elif plot_type == "multiline":
|
||||
source = data.stocks()
|
||||
|
||||
highlight = alt.selection(type='single', on='mouseover',
|
||||
fields=['symbol'], nearest=True)
|
||||
|
||||
base = alt.Chart(source).encode(
|
||||
x='date:T',
|
||||
y='price:Q',
|
||||
color='symbol:N'
|
||||
)
|
||||
|
||||
points = base.mark_circle().encode(
|
||||
opacity=alt.value(0)
|
||||
).add_selection(
|
||||
highlight
|
||||
).properties(
|
||||
width=600
|
||||
)
|
||||
|
||||
lines = base.mark_line().encode(
|
||||
size=alt.condition(~highlight, alt.value(1), alt.value(3))
|
||||
)
|
||||
|
||||
return points + lines
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
button = gr.Radio(label="Plot type",
|
||||
choices=['scatter_plot', 'heatmap', 'us_map',
|
||||
'interactive_barplot', "radial", "multiline"], value='scatter_plot')
|
||||
plot = gr.Plot(label="Plot")
|
||||
button.change(make_plot, inputs=button, outputs=[plot])
|
||||
demo.load(make_plot, inputs=[button], outputs=[plot])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(audio):\n", " sr, y = audio\n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " return transcriber({\"sampling_rate\": sr, \"raw\": y})[\"text\"]\n", "\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " gr.Audio(sources=[\"microphone\"]),\n", " \"text\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(audio):\n", " sr, y = audio\n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " return transcriber({\"sampling_rate\": sr, \"raw\": y})[\"text\"] # type: ignore\n", "\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " gr.Audio(sources=[\"microphone\"]),\n", " \"text\",\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -9,7 +9,7 @@ def transcribe(audio):
|
||||
y = y.astype(np.float32)
|
||||
y /= np.max(np.abs(y))
|
||||
|
||||
return transcriber({"sampling_rate": sr, "raw": y})["text"]
|
||||
return transcriber({"sampling_rate": sr, "raw": y})["text"] # type: ignore
|
||||
|
||||
|
||||
demo = gr.Interface(
|
||||
|
@ -1 +0,0 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_component_shortcut"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def greet(str):\n", " return str\n", "\n", "\n", "with gr.Blocks() as demo:\n", " \"\"\"\n", " You can make use of str shortcuts you use in Interface within Blocks as well.\n", " \n", " Interface shortcut example:\n", " Interface(greet, \"textarea\", \"textarea\")\n", " \n", " You can use \n", " 1. gr.component()\n", " 2. gr.templates.Template()\n", " 3. gr.Template()\n", " All the templates are listed in gradio/templates.py\n", " \"\"\"\n", " with gr.Row():\n", " text1 = gr.component(\"textarea\")\n", " text2 = gr.TextArea()\n", " text3 = gr.templates.TextArea()\n", " text1.blur(greet, text1, text2)\n", " text2.blur(greet, text2, text3)\n", " text3.blur(greet, text3, text1)\n", " button = gr.component(\"button\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,31 +0,0 @@
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def greet(str):
|
||||
return str
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
"""
|
||||
You can make use of str shortcuts you use in Interface within Blocks as well.
|
||||
|
||||
Interface shortcut example:
|
||||
Interface(greet, "textarea", "textarea")
|
||||
|
||||
You can use
|
||||
1. gr.component()
|
||||
2. gr.templates.Template()
|
||||
3. gr.Template()
|
||||
All the templates are listed in gradio/templates.py
|
||||
"""
|
||||
with gr.Row():
|
||||
text1 = gr.component("textarea")
|
||||
text2 = gr.TextArea()
|
||||
text3 = gr.templates.TextArea()
|
||||
text1.blur(greet, text1, text2)
|
||||
text2.blur(greet, text2, text3)
|
||||
text3.blur(greet, text3, text1)
|
||||
button = gr.component("button")
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_flag"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img, strength):\n", " sepia_filter = strength * np.array(\n", " [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n", " ) + (1-strength) * np.identity(3)\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "callback = gr.CSVLogger()\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " img_input = gr.Image()\n", " strength = gr.Slider(0, 1, 0.5)\n", " img_output = gr.Image()\n", " with gr.Row():\n", " btn = gr.Button(\"Flag\")\n", " \n", " # This needs to be called at some point prior to the first call to callback.flag()\n", " callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n", "\n", " img_input.change(sepia, [img_input, strength], img_output)\n", " strength.change(sepia, [img_input, strength], img_output)\n", " \n", " # We can choose which components to flag -- in this case, we'll flag all of them\n", " btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_flag"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "def sepia(input_img, strength):\n", " sepia_filter = strength * np.array(\n", " [[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]\n", " ) + (1-strength) * np.identity(3)\n", " sepia_img = input_img.dot(sepia_filter.T)\n", " sepia_img /= sepia_img.max()\n", " return sepia_img\n", "\n", "callback = gr.CSVLogger()\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " img_input = gr.Image()\n", " strength = gr.Slider(0, 1, 0.5)\n", " img_output = gr.Image()\n", " with gr.Row():\n", " btn = gr.Button(\"Flag\")\n", " \n", " # This needs to be called at some point prior to the first call to callback.flag()\n", " callback.setup([img_input, strength, img_output], \"flagged_data_points\")\n", "\n", " img_input.change(sepia, [img_input, strength], img_output)\n", " strength.change(sepia, [img_input, strength], img_output)\n", " \n", " # We can choose which components to flag -- in this case, we'll flag all of them\n", " btn.click(lambda *args: callback.flag(list(args)), [img_input, strength, img_output], None, preprocess=False)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -27,7 +27,7 @@ with gr.Blocks() as demo:
|
||||
strength.change(sepia, [img_input, strength], img_output)
|
||||
|
||||
# We can choose which components to flag -- in this case, we'll flag all of them
|
||||
btn.click(lambda *args: callback.flag(args), [img_input, strength, img_output], None, preprocess=False)
|
||||
btn.click(lambda *args: callback.flag(list(args)), [img_input, strength, img_output], None, preprocess=False)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
|
File diff suppressed because one or more lines are too long
@ -1,289 +0,0 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
from os.path import abspath, join, pardir
|
||||
|
||||
KS_FILES = abspath(join(__file__, pardir, pardir, "kitchen_sink", "files"))
|
||||
|
||||
base_theme = gr.themes.Base()
|
||||
default_theme = gr.themes.Default()
|
||||
monochrome_theme = gr.themes.Monochrome()
|
||||
soft_theme = gr.themes.Soft()
|
||||
glass_theme = gr.themes.Glass()
|
||||
|
||||
with gr.Blocks(theme=base_theme) as demo:
|
||||
gr.Markdown(
|
||||
"""
|
||||
# Blocks Kitchen Sink
|
||||
This is a demo of most Gradio features. Test all themes and toggle dark mode
|
||||
## Elements
|
||||
- Use of Rows, Columns, Tabs, and Accordion
|
||||
- Use of Form elements: Textbox, Dropdown, Checkbox, Radio, Slider
|
||||
## Other
|
||||
Other stuff
|
||||
- Buttons of variants: "primary", "secondary", "stop"
|
||||
- Embedded interface
|
||||
- Custom progress bar
|
||||
"""
|
||||
)
|
||||
toggle_dark = gr.Button("Toggle Dark", scale=0)
|
||||
toggle_dark.click(
|
||||
None,
|
||||
js="""
|
||||
() => {
|
||||
document.body.classList.toggle('dark');
|
||||
}
|
||||
""",
|
||||
)
|
||||
theme_selector = gr.Radio(
|
||||
["Base", "Default", "Monochrome", "Soft", "Glass"],
|
||||
value="Base",
|
||||
label="Theme",
|
||||
)
|
||||
theme_selector.change(
|
||||
None,
|
||||
theme_selector,
|
||||
None,
|
||||
js=f"""
|
||||
(theme) => {{
|
||||
if (!document.querySelector('.theme-css')) {{
|
||||
var theme_elem = document.createElement('style');
|
||||
theme_elem.classList.add('theme-css');
|
||||
document.head.appendChild(theme_elem);
|
||||
|
||||
var link_elem = document.createElement('link');
|
||||
link_elem.classList.add('link-css');
|
||||
link_elem.rel = 'stylesheet';
|
||||
document.head.appendChild(link_elem);
|
||||
}} else {{
|
||||
var theme_elem = document.querySelector('.theme-css');
|
||||
var link_elem = document.querySelector('.link-css');
|
||||
}}
|
||||
if (theme == "Base") {{
|
||||
var theme_css = `{base_theme._get_theme_css()}`;
|
||||
var link_css = `{base_theme._stylesheets[0]}`;
|
||||
}} else if (theme == "Default") {{
|
||||
var theme_css = `{default_theme._get_theme_css()}`;
|
||||
var link_css = `{default_theme._stylesheets[0]}`;
|
||||
}} else if (theme == "Monochrome") {{
|
||||
var theme_css = `{monochrome_theme._get_theme_css()}`;
|
||||
var link_css = `{monochrome_theme._stylesheets[0]}`;
|
||||
}} else if (theme == "Soft") {{
|
||||
var theme_css = `{soft_theme._get_theme_css()}`;
|
||||
var link_css = `{soft_theme._stylesheets[0]}`;
|
||||
}} else if (theme == "Glass") {{
|
||||
var theme_css = `{glass_theme._get_theme_css()}`;
|
||||
var link_css = `{glass_theme._stylesheets[0]}`;
|
||||
}}
|
||||
theme_elem.innerHTML = theme_css;
|
||||
link_elem.href = link_css;
|
||||
}}
|
||||
""",
|
||||
)
|
||||
|
||||
name = gr.Textbox(
|
||||
label="Name (select)",
|
||||
info="Full name, including middle name. No special characters.",
|
||||
placeholder="John Doe",
|
||||
value="John Doe",
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
slider1 = gr.Slider(label="Slider 1")
|
||||
slider2 = gr.Slider(label="Slider 2")
|
||||
checkboxes = gr.CheckboxGroup(["A", "B", "C"], label="Checkbox Group (select)")
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(variant="panel", scale=1):
|
||||
gr.Markdown("## Panel 1")
|
||||
radio = gr.Radio(
|
||||
["A", "B", "C"],
|
||||
label="Radio (select)",
|
||||
info="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.",
|
||||
)
|
||||
drop = gr.Dropdown(["Option 1", "Option 2", "Option 3"], show_label=False)
|
||||
drop_2 = gr.Dropdown(
|
||||
["Option A", "Option B", "Option C"],
|
||||
multiselect=True,
|
||||
value=["Option A"],
|
||||
label="Dropdown (select)",
|
||||
interactive=True,
|
||||
)
|
||||
check = gr.Checkbox(label="Go")
|
||||
with gr.Column(variant="panel", scale=2):
|
||||
img = gr.Image(
|
||||
"https://picsum.photos/536/354",
|
||||
label="Image",
|
||||
height=320,
|
||||
)
|
||||
with gr.Row():
|
||||
go_btn = gr.Button("Go", label="Primary Button", variant="primary")
|
||||
clear_btn = gr.Button(
|
||||
"Clear", label="Secondary Button", variant="secondary"
|
||||
)
|
||||
|
||||
def go(*args):
|
||||
time.sleep(3)
|
||||
return "https://i.ibb.co/6BgKdSj/groot.jpg"
|
||||
|
||||
go_btn.click(go, [radio, drop, drop_2, check, name], img, api_name="go")
|
||||
|
||||
def clear():
|
||||
time.sleep(0.2)
|
||||
return None
|
||||
|
||||
clear_btn.click(clear, None, img)
|
||||
|
||||
with gr.Row():
|
||||
btn1 = gr.Button("Button 1", size="sm")
|
||||
btn2 = gr.UploadButton(size="sm")
|
||||
stop_btn = gr.Button(
|
||||
"Stop", label="Stop Button", variant="stop", size="sm"
|
||||
)
|
||||
|
||||
gr.Examples(
|
||||
examples=[join(KS_FILES, "lion.jpg"), join(KS_FILES, "tower.jpg")],
|
||||
inputs=img,
|
||||
)
|
||||
|
||||
gr.Examples(
|
||||
examples=[
|
||||
["A", "Option 1", ["Option B"], True, join(KS_FILES, "lion.jpg")],
|
||||
[
|
||||
"B",
|
||||
"Option 2",
|
||||
["Option B", "Option C"],
|
||||
False,
|
||||
join(KS_FILES, "tower.jpg"),
|
||||
],
|
||||
],
|
||||
inputs=[radio, drop, drop_2, check, img],
|
||||
label="Examples (select)",
|
||||
)
|
||||
|
||||
gr.Markdown("## Media Files")
|
||||
|
||||
with gr.Tabs() as tabs:
|
||||
with gr.Tab("Audio"):
|
||||
with gr.Row():
|
||||
gr.Audio()
|
||||
gr.Audio(sources=["microphone"])
|
||||
gr.Audio(join(KS_FILES, "cantina.wav"))
|
||||
with gr.Tab("Other"):
|
||||
# gr.Image(source="webcam")
|
||||
gr.HTML(
|
||||
"<div style='width: 100px; height: 100px; background-color: blue;'></div>"
|
||||
)
|
||||
with gr.Row():
|
||||
dataframe = gr.Dataframe(
|
||||
value=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], label="Dataframe (select)"
|
||||
)
|
||||
gr.JSON(
|
||||
value={"a": 1, "b": 2, "c": {"test": "a", "test2": [1, 2, 3]}}, label="JSON"
|
||||
)
|
||||
label = gr.Label(
|
||||
value={"cat": 0.7, "dog": 0.2, "fish": 0.1}, label="Label (select)"
|
||||
)
|
||||
file = gr.File(label="File (select)")
|
||||
with gr.Row():
|
||||
gr.ColorPicker()
|
||||
gr.Video(join(KS_FILES, "world.mp4"))
|
||||
gallery = gr.Gallery(
|
||||
[
|
||||
(join(KS_FILES, "lion.jpg"), "lion"),
|
||||
(join(KS_FILES, "logo.png"), "logo"),
|
||||
(join(KS_FILES, "tower.jpg"), "tower"),
|
||||
],
|
||||
label="Gallery (select)",
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=2):
|
||||
highlight = gr.HighlightedText(
|
||||
[["The", "art"], ["dog", "noun"], ["is", None], ["fat", "adj"]],
|
||||
label="Highlighted Text (select)",
|
||||
)
|
||||
chatbot = gr.Chatbot([["Hello", "Hi"]], label="Chatbot (select)")
|
||||
chat_btn = gr.Button("Add messages")
|
||||
|
||||
def chat(history):
|
||||
time.sleep(2)
|
||||
yield [["How are you?", "I am good."]]
|
||||
time
|
||||
|
||||
chat_btn.click(
|
||||
lambda history: history
|
||||
+ [["How are you?", "I am good."]]
|
||||
+ (time.sleep(2) or []),
|
||||
chatbot,
|
||||
chatbot,
|
||||
)
|
||||
with gr.Column(scale=1):
|
||||
with gr.Accordion("Select Info"):
|
||||
gr.Markdown(
|
||||
"Click on any part of any component with '(select)' in the label and see the SelectData data here."
|
||||
)
|
||||
select_index = gr.Textbox(label="Index")
|
||||
select_value = gr.Textbox(label="Value")
|
||||
select_selected = gr.Textbox(label="Selected")
|
||||
|
||||
selectables = [
|
||||
name,
|
||||
checkboxes,
|
||||
radio,
|
||||
drop_2,
|
||||
dataframe,
|
||||
label,
|
||||
file,
|
||||
highlight,
|
||||
chatbot,
|
||||
gallery,
|
||||
tabs,
|
||||
]
|
||||
|
||||
def select_data(evt: gr.SelectData):
|
||||
return [
|
||||
evt.index,
|
||||
evt.value,
|
||||
evt.selected,
|
||||
]
|
||||
|
||||
for selectable in selectables:
|
||||
selectable.select(
|
||||
select_data,
|
||||
None,
|
||||
[select_index, select_value, select_selected],
|
||||
)
|
||||
|
||||
gr.Markdown("## Dataset Examples")
|
||||
|
||||
component_example_set = [
|
||||
(gr.Audio(render=False), join(KS_FILES, "cantina.wav")),
|
||||
(gr.Checkbox(render=False), True),
|
||||
(gr.CheckboxGroup(render=False, choices=["A", "B"]), ["A", "B"]),
|
||||
(gr.ColorPicker(render=False), "#FF0000"),
|
||||
(gr.Dataframe(render=False), [[1, 2, 3], [4, 5, 6]]),
|
||||
(gr.Dropdown(render=False), "A"),
|
||||
(gr.File(render=False), join(KS_FILES, "lion.jpg")),
|
||||
(gr.HTML(render=False), "<div>Test</div>"),
|
||||
(gr.Image(render=False), join(KS_FILES, "lion.jpg")),
|
||||
(gr.Markdown(render=False), "# Test"),
|
||||
(gr.Number(render=False), 1),
|
||||
(gr.Radio(render=False), "A"),
|
||||
(gr.Slider(render=False), 1),
|
||||
(gr.Textbox(render=False), "A"),
|
||||
(gr.Video(render=False), join(KS_FILES, "world.mp4")),
|
||||
]
|
||||
gr.Dataset(
|
||||
components=[c for c, _ in component_example_set],
|
||||
samples=[[e for _, e in component_example_set]],
|
||||
)
|
||||
|
||||
with gr.Tabs():
|
||||
for c, e in component_example_set:
|
||||
with gr.Tab(c.__class__.__name__):
|
||||
gr.Dataset(components=[c], samples=[[e]] * 3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(allowed_paths=[KS_FILES])
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_speech_text_sentiment"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "\n", "import gradio as gr\n", "\n", "asr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\n", "classifier = pipeline(\"text-classification\")\n", "\n", "\n", "def speech_to_text(speech):\n", " text = asr(speech)[\"text\"]\n", " return text\n", "\n", "\n", "def text_to_sentiment(text):\n", " return classifier(text)[0][\"label\"]\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " audio_file = gr.Audio(type=\"filepath\")\n", " text = gr.Textbox()\n", " label = gr.Label()\n", "\n", " b1 = gr.Button(\"Recognize Speech\")\n", " b2 = gr.Button(\"Classify Sentiment\")\n", "\n", " b1.click(speech_to_text, inputs=audio_file, outputs=text)\n", " b2.click(text_to_sentiment, inputs=text, outputs=label)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_speech_text_sentiment"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "\n", "import gradio as gr\n", "\n", "asr = pipeline(\"automatic-speech-recognition\", \"facebook/wav2vec2-base-960h\")\n", "classifier = pipeline(\"text-classification\")\n", "\n", "\n", "def speech_to_text(speech):\n", " text = asr(speech)[\"text\"] # type: ignore\n", " return text\n", "\n", "\n", "def text_to_sentiment(text):\n", " return classifier(text)[0][\"label\"] # type: ignore\n", "\n", "\n", "demo = gr.Blocks()\n", "\n", "with demo:\n", " audio_file = gr.Audio(type=\"filepath\")\n", " text = gr.Textbox()\n", " label = gr.Label()\n", "\n", " b1 = gr.Button(\"Recognize Speech\")\n", " b2 = gr.Button(\"Classify Sentiment\")\n", "\n", " b1.click(speech_to_text, inputs=audio_file, outputs=text)\n", " b2.click(text_to_sentiment, inputs=text, outputs=label)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -7,12 +7,12 @@ classifier = pipeline("text-classification")
|
||||
|
||||
|
||||
def speech_to_text(speech):
|
||||
text = asr(speech)["text"]
|
||||
text = asr(speech)["text"] # type: ignore
|
||||
return text
|
||||
|
||||
|
||||
def text_to_sentiment(text):
|
||||
return classifier(text)[0]["label"]
|
||||
return classifier(text)[0]["label"] # type: ignore
|
||||
|
||||
|
||||
demo = gr.Blocks()
|
||||
|
@ -1 +0,0 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: blocks_style"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks(title=\"Styling Examples\") as demo:\n", " with gr.Column(variant=\"box\"):\n", " txt = gr.Textbox(label=\"Small Textbox\", lines=1)\n", " num = gr.Number(label=\"Number\", show_label=False)\n", " slider = gr.Slider(label=\"Slider\", show_label=False)\n", " check = gr.Checkbox(label=\"Checkbox\", show_label=False)\n", " check_g = gr.CheckboxGroup(\n", " label=\"Checkbox Group\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " radio = gr.Radio(\n", " label=\"Radio\",\n", " choices=[\"One\", \"Two\", \"Three\"],\n", " show_label=False,\n", " )\n", " drop = gr.Dropdown(\n", " label=\"Dropdown\", choices=[\"One\", \"Two\", \"Three\"], show_label=False\n", " )\n", " image = gr.Image(show_label=False)\n", " video = gr.Video(show_label=False)\n", " audio = gr.Audio(show_label=False)\n", " file = gr.File(show_label=False)\n", " df = gr.Dataframe(show_label=False)\n", " label = gr.Label(container=False)\n", " highlight = gr.HighlightedText(\n", " [(\"hello\", None), (\"goodbye\", \"-\")],\n", " color_map={\"+\": \"green\", \"-\": \"red\"},\n", " container=False,\n", " )\n", " json = gr.JSON(container=False)\n", " html = gr.HTML(show_label=False)\n", " gallery = gr.Gallery(\n", " columns=(3, 3, 1),\n", " height=\"auto\",\n", " container=False,\n", " )\n", " chat = gr.Chatbot([(\"hi\", \"good bye\")])\n", "\n", " model = gr.Model3D()\n", "\n", " md = gr.Markdown(show_label=False)\n", "\n", " highlight = gr.HighlightedText()\n", "\n", " btn = gr.Button(\"Run\")\n", "\n", " gr.Dataset(components=[txt, num])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,54 +0,0 @@
|
||||
import gradio as gr
|
||||
|
||||
with gr.Blocks(title="Styling Examples") as demo:
|
||||
with gr.Column(variant="box"):
|
||||
txt = gr.Textbox(label="Small Textbox", lines=1)
|
||||
num = gr.Number(label="Number", show_label=False)
|
||||
slider = gr.Slider(label="Slider", show_label=False)
|
||||
check = gr.Checkbox(label="Checkbox", show_label=False)
|
||||
check_g = gr.CheckboxGroup(
|
||||
label="Checkbox Group",
|
||||
choices=["One", "Two", "Three"],
|
||||
show_label=False,
|
||||
)
|
||||
radio = gr.Radio(
|
||||
label="Radio",
|
||||
choices=["One", "Two", "Three"],
|
||||
show_label=False,
|
||||
)
|
||||
drop = gr.Dropdown(
|
||||
label="Dropdown", choices=["One", "Two", "Three"], show_label=False
|
||||
)
|
||||
image = gr.Image(show_label=False)
|
||||
video = gr.Video(show_label=False)
|
||||
audio = gr.Audio(show_label=False)
|
||||
file = gr.File(show_label=False)
|
||||
df = gr.Dataframe(show_label=False)
|
||||
label = gr.Label(container=False)
|
||||
highlight = gr.HighlightedText(
|
||||
[("hello", None), ("goodbye", "-")],
|
||||
color_map={"+": "green", "-": "red"},
|
||||
container=False,
|
||||
)
|
||||
json = gr.JSON(container=False)
|
||||
html = gr.HTML(show_label=False)
|
||||
gallery = gr.Gallery(
|
||||
columns=(3, 3, 1),
|
||||
height="auto",
|
||||
container=False,
|
||||
)
|
||||
chat = gr.Chatbot([("hi", "good bye")])
|
||||
|
||||
model = gr.Model3D()
|
||||
|
||||
md = gr.Markdown(show_label=False)
|
||||
|
||||
highlight = gr.HighlightedText()
|
||||
|
||||
btn = gr.Button("Run")
|
||||
|
||||
gr.Dataset(components=[txt, num])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(xyz.OpenStreetMap.Mapnik)\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: bokeh_plot"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio bokeh>=3.0 xyzservices"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import xyzservices.providers as xyz\n", "from bokeh.models import ColumnDataSource, Whisker\n", "from bokeh.plotting import figure\n", "from bokeh.sampledata.autompg2 import autompg2 as df\n", "from bokeh.sampledata.penguins import data\n", "from bokeh.transform import factor_cmap, jitter, factor_mark\n", "\n", "\n", "def get_plot(plot_type):\n", " if plot_type == \"map\":\n", " plot = figure(\n", " x_range=(-2000000, 6000000),\n", " y_range=(-1000000, 7000000),\n", " x_axis_type=\"mercator\",\n", " y_axis_type=\"mercator\",\n", " )\n", " plot.add_tile(xyz.OpenStreetMap.Mapnik) # type: ignore\n", " return plot\n", " elif plot_type == \"whisker\":\n", " classes = list(sorted(df[\"class\"].unique()))\n", "\n", " p = figure(\n", " height=400,\n", " x_range=classes,\n", " background_fill_color=\"#efefef\",\n", " title=\"Car class vs HWY mpg with quintile ranges\",\n", " )\n", " p.xgrid.grid_line_color = None\n", "\n", " g = df.groupby(\"class\")\n", " upper = g.hwy.quantile(0.80)\n", " lower = g.hwy.quantile(0.20)\n", " source = ColumnDataSource(data=dict(base=classes, upper=upper, lower=lower))\n", "\n", " error = Whisker(\n", " base=\"base\",\n", " upper=\"upper\",\n", " lower=\"lower\",\n", " source=source,\n", " level=\"annotation\",\n", " line_width=2,\n", " )\n", " error.upper_head.size = 20\n", " error.lower_head.size = 20\n", " p.add_layout(error)\n", "\n", " p.circle(\n", " jitter(\"class\", 0.3, range=p.x_range),\n", " \"hwy\",\n", " source=df,\n", " alpha=0.5,\n", " size=13,\n", " line_color=\"white\",\n", " color=factor_cmap(\"class\", \"Light6\", classes),\n", " )\n", " return p\n", " elif plot_type == \"scatter\":\n", "\n", " SPECIES = sorted(data.species.unique())\n", " MARKERS = [\"hex\", \"circle_x\", \"triangle\"]\n", "\n", " p = figure(title=\"Penguin size\", background_fill_color=\"#fafafa\")\n", " p.xaxis.axis_label = \"Flipper Length (mm)\"\n", " p.yaxis.axis_label = \"Body Mass (g)\"\n", "\n", " p.scatter(\n", " \"flipper_length_mm\",\n", " \"body_mass_g\",\n", " source=data,\n", " legend_group=\"species\",\n", " fill_alpha=0.4,\n", " size=12,\n", " marker=factor_mark(\"species\", MARKERS, SPECIES),\n", " color=factor_cmap(\"species\", \"Category10_3\", SPECIES),\n", " )\n", "\n", " p.legend.location = \"top_left\"\n", " p.legend.title = \"Species\"\n", " return p\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " plot_type = gr.Radio(value=\"scatter\", choices=[\"scatter\", \"whisker\", \"map\"])\n", " plot = gr.Plot()\n", " plot_type.change(get_plot, inputs=[plot_type], outputs=[plot])\n", " demo.load(get_plot, inputs=[plot_type], outputs=[plot])\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -15,7 +15,7 @@ def get_plot(plot_type):
|
||||
x_axis_type="mercator",
|
||||
y_axis_type="mercator",
|
||||
)
|
||||
plot.add_tile(xyz.OpenStreetMap.Mapnik)
|
||||
plot.add_tile(xyz.OpenStreetMap.Mapnik) # type: ignore
|
||||
return plot
|
||||
elif plot_type == "whisker":
|
||||
classes = list(sorted(df["class"].unique()))
|
||||
|
File diff suppressed because one or more lines are too long
@ -114,7 +114,7 @@ components = [
|
||||
)
|
||||
),
|
||||
gr.Dataframe(
|
||||
value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"])
|
||||
value=lambda: pd.DataFrame({"random_number_rows": range(5)}, columns=["one", "two", "three"]) # type: ignore
|
||||
),
|
||||
gr.ColorPicker(value=lambda: random.choice(["#000000", "#ff0000", "#0000FF"])),
|
||||
gr.Label(value=lambda: random.choice(["Pedestrian", "Car", "Cyclist"])),
|
||||
@ -148,7 +148,7 @@ def evaluate_values(*args):
|
||||
are_false = []
|
||||
for a in args:
|
||||
if isinstance(a, (pd.DataFrame, np.ndarray)):
|
||||
are_false.append(not a.any().any())
|
||||
are_false.append(not a.any().any()) # type: ignore
|
||||
elif isinstance(a, str) and a.startswith("#"):
|
||||
are_false.append(a == "#000000")
|
||||
else:
|
||||
|
@ -1 +0,0 @@
|
||||
This demo built with Blocks generates 9 plots based on the input.
|
@ -1,2 +0,0 @@
|
||||
matplotlib>=3.5.2
|
||||
scikit-learn>=1.0.1
|
File diff suppressed because one or more lines are too long
@ -1,282 +0,0 @@
|
||||
import gradio as gr
|
||||
import math
|
||||
from functools import partial
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from sklearn.cluster import (
|
||||
AgglomerativeClustering, Birch, DBSCAN, KMeans, MeanShift, OPTICS, SpectralClustering, estimate_bandwidth
|
||||
)
|
||||
from sklearn.datasets import make_blobs, make_circles, make_moons
|
||||
from sklearn.mixture import GaussianMixture
|
||||
from sklearn.neighbors import kneighbors_graph
|
||||
from sklearn.preprocessing import StandardScaler
|
||||
|
||||
plt.style.use('seaborn-v0_8')
|
||||
SEED = 0
|
||||
MAX_CLUSTERS = 10
|
||||
N_SAMPLES = 1000
|
||||
N_COLS = 3
|
||||
FIGSIZE = 7, 7 # does not affect size in webpage
|
||||
COLORS = [
|
||||
'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan'
|
||||
]
|
||||
if len(COLORS) <= MAX_CLUSTERS:
|
||||
raise ValueError("Not enough different colors for all clusters")
|
||||
np.random.seed(SEED)
|
||||
|
||||
|
||||
def normalize(X):
|
||||
return StandardScaler().fit_transform(X)
|
||||
|
||||
def get_regular(n_clusters):
|
||||
# spiral pattern
|
||||
centers = [
|
||||
[0, 0],
|
||||
[1, 0],
|
||||
[1, 1],
|
||||
[0, 1],
|
||||
[-1, 1],
|
||||
[-1, 0],
|
||||
[-1, -1],
|
||||
[0, -1],
|
||||
[1, -1],
|
||||
[2, -1],
|
||||
][:n_clusters]
|
||||
assert len(centers) == n_clusters
|
||||
X, labels = make_blobs(n_samples=N_SAMPLES, centers=centers, cluster_std=0.25, random_state=SEED)
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
def get_circles(n_clusters):
|
||||
X, labels = make_circles(n_samples=N_SAMPLES, factor=0.5, noise=0.05, random_state=SEED)
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
def get_moons(n_clusters):
|
||||
X, labels = make_moons(n_samples=N_SAMPLES, noise=0.05, random_state=SEED)
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
def get_noise(n_clusters):
|
||||
np.random.seed(SEED)
|
||||
X, labels = np.random.rand(N_SAMPLES, 2), np.random.randint(0, n_clusters, size=(N_SAMPLES,))
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
def get_anisotropic(n_clusters):
|
||||
X, labels = make_blobs(n_samples=N_SAMPLES, centers=n_clusters, random_state=170)
|
||||
transformation = [[0.6, -0.6], [-0.4, 0.8]]
|
||||
X = np.dot(X, transformation)
|
||||
return X, labels
|
||||
|
||||
|
||||
def get_varied(n_clusters):
|
||||
cluster_std = [1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0, 2.5, 0.5, 1.0][:n_clusters]
|
||||
assert len(cluster_std) == n_clusters
|
||||
X, labels = make_blobs(
|
||||
n_samples=N_SAMPLES, centers=n_clusters, cluster_std=cluster_std, random_state=SEED
|
||||
)
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
def get_spiral(n_clusters):
|
||||
# from https://scikit-learn.org/stable/auto_examples/cluster/plot_agglomerative_clustering.html
|
||||
np.random.seed(SEED)
|
||||
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, N_SAMPLES))
|
||||
x = t * np.cos(t)
|
||||
y = t * np.sin(t)
|
||||
X = np.concatenate((x, y))
|
||||
X += 0.7 * np.random.randn(2, N_SAMPLES)
|
||||
X = np.ascontiguousarray(X.T)
|
||||
|
||||
labels = np.zeros(N_SAMPLES, dtype=int)
|
||||
return normalize(X), labels
|
||||
|
||||
|
||||
DATA_MAPPING = {
|
||||
'regular': get_regular,
|
||||
'circles': get_circles,
|
||||
'moons': get_moons,
|
||||
'spiral': get_spiral,
|
||||
'noise': get_noise,
|
||||
'anisotropic': get_anisotropic,
|
||||
'varied': get_varied,
|
||||
}
|
||||
|
||||
|
||||
def get_groundtruth_model(X, labels, n_clusters, **kwargs):
|
||||
# dummy model to show true label distribution
|
||||
class Dummy:
|
||||
def __init__(self, y):
|
||||
self.labels_ = labels
|
||||
|
||||
return Dummy(labels)
|
||||
|
||||
|
||||
def get_kmeans(X, labels, n_clusters, **kwargs):
|
||||
model = KMeans(init="k-means++", n_clusters=n_clusters, n_init=10, random_state=SEED)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_dbscan(X, labels, n_clusters, **kwargs):
|
||||
model = DBSCAN(eps=0.3)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_agglomerative(X, labels, n_clusters, **kwargs):
|
||||
connectivity = kneighbors_graph(
|
||||
X, n_neighbors=n_clusters, include_self=False
|
||||
)
|
||||
# make connectivity symmetric
|
||||
connectivity = 0.5 * (connectivity + connectivity.T)
|
||||
model = AgglomerativeClustering(
|
||||
n_clusters=n_clusters, linkage="ward", connectivity=connectivity
|
||||
)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_meanshift(X, labels, n_clusters, **kwargs):
|
||||
bandwidth = estimate_bandwidth(X, quantile=0.25)
|
||||
model = MeanShift(bandwidth=bandwidth, bin_seeding=True)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_spectral(X, labels, n_clusters, **kwargs):
|
||||
model = SpectralClustering(
|
||||
n_clusters=n_clusters,
|
||||
eigen_solver="arpack",
|
||||
affinity="nearest_neighbors",
|
||||
)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_optics(X, labels, n_clusters, **kwargs):
|
||||
model = OPTICS(
|
||||
min_samples=7,
|
||||
xi=0.05,
|
||||
min_cluster_size=0.1,
|
||||
)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_birch(X, labels, n_clusters, **kwargs):
|
||||
model = Birch(n_clusters=n_clusters)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
def get_gaussianmixture(X, labels, n_clusters, **kwargs):
|
||||
model = GaussianMixture(
|
||||
n_components=n_clusters, covariance_type="full", random_state=SEED,
|
||||
)
|
||||
model.set_params(**kwargs)
|
||||
return model.fit(X)
|
||||
|
||||
|
||||
MODEL_MAPPING = {
|
||||
'True labels': get_groundtruth_model,
|
||||
'KMeans': get_kmeans,
|
||||
'DBSCAN': get_dbscan,
|
||||
'MeanShift': get_meanshift,
|
||||
'SpectralClustering': get_spectral,
|
||||
'OPTICS': get_optics,
|
||||
'Birch': get_birch,
|
||||
'GaussianMixture': get_gaussianmixture,
|
||||
'AgglomerativeClustering': get_agglomerative,
|
||||
}
|
||||
|
||||
|
||||
def plot_clusters(ax, X, labels):
|
||||
set_clusters = set(labels)
|
||||
set_clusters.discard(-1) # -1 signifiies outliers, which we plot separately
|
||||
for label, color in zip(sorted(set_clusters), COLORS):
|
||||
idx = labels == label
|
||||
if not sum(idx):
|
||||
continue
|
||||
ax.scatter(X[idx, 0], X[idx, 1], color=color)
|
||||
|
||||
# show outliers (if any)
|
||||
idx = labels == -1
|
||||
if sum(idx):
|
||||
ax.scatter(X[idx, 0], X[idx, 1], c='k', marker='x')
|
||||
|
||||
ax.grid(None)
|
||||
ax.set_xticks([])
|
||||
ax.set_yticks([])
|
||||
return ax
|
||||
|
||||
|
||||
def cluster(dataset: str, n_clusters: int, clustering_algorithm: str):
|
||||
if isinstance(n_clusters, dict):
|
||||
n_clusters = n_clusters['value']
|
||||
else:
|
||||
n_clusters = int(n_clusters)
|
||||
|
||||
X, labels = DATA_MAPPING[dataset](n_clusters)
|
||||
model = MODEL_MAPPING[clustering_algorithm](X, labels, n_clusters=n_clusters)
|
||||
if hasattr(model, "labels_"):
|
||||
y_pred = model.labels_.astype(int)
|
||||
else:
|
||||
y_pred = model.predict(X)
|
||||
|
||||
fig, ax = plt.subplots(figsize=FIGSIZE)
|
||||
|
||||
plot_clusters(ax, X, y_pred)
|
||||
ax.set_title(clustering_algorithm, fontsize=16)
|
||||
|
||||
return fig
|
||||
|
||||
|
||||
title = "Clustering with Scikit-learn"
|
||||
description = (
|
||||
"This example shows how different clustering algorithms work. Simply pick "
|
||||
"the dataset and the number of clusters to see how the clustering algorithms work. "
|
||||
"Colored circles are (predicted) labels and black x are outliers."
|
||||
)
|
||||
|
||||
|
||||
def iter_grid(n_rows, n_cols):
|
||||
# create a grid using gradio Block
|
||||
for _ in range(n_rows):
|
||||
with gr.Row():
|
||||
for _ in range(n_cols):
|
||||
with gr.Column():
|
||||
yield
|
||||
|
||||
with gr.Blocks(title=title) as demo:
|
||||
gr.HTML(f"<b>{title}</b>")
|
||||
gr.Markdown(description)
|
||||
|
||||
input_models = list(MODEL_MAPPING)
|
||||
input_data = gr.Radio(
|
||||
list(DATA_MAPPING),
|
||||
value="regular",
|
||||
label="dataset"
|
||||
)
|
||||
input_n_clusters = gr.Slider(
|
||||
minimum=1,
|
||||
maximum=MAX_CLUSTERS,
|
||||
value=4,
|
||||
step=1,
|
||||
label='Number of clusters'
|
||||
)
|
||||
n_rows = int(math.ceil(len(input_models) / N_COLS))
|
||||
counter = 0
|
||||
for _ in iter_grid(n_rows, N_COLS):
|
||||
if counter >= len(input_models):
|
||||
break
|
||||
|
||||
input_model = input_models[counter]
|
||||
plot = gr.Plot(label=input_model)
|
||||
fn = partial(cluster, clustering_algorithm=input_model)
|
||||
input_data.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)
|
||||
input_n_clusters.change(fn=fn, inputs=[input_data, input_n_clusters], outputs=plot)
|
||||
counter += 1
|
||||
|
||||
demo.launch()
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: color_generator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import cv2\n", "import numpy as np\n", "import random\n", "\n", "\n", "# Convert decimal color to hexadecimal color\n", "def RGB_to_Hex(rgb):\n", " color = \"#\"\n", " for i in rgb:\n", " num = int(i)\n", " color += str(hex(num))[-2:].replace(\"x\", \"0\").upper()\n", " return color\n", "\n", "\n", "# Randomly generate light or dark colors\n", "def random_color(is_light=True):\n", " return (\n", " random.randint(0, 127) + int(is_light) * 128,\n", " random.randint(0, 127) + int(is_light) * 128,\n", " random.randint(0, 127) + int(is_light) * 128,\n", " )\n", "\n", "\n", "def switch_color(color_style):\n", " if color_style == \"light\":\n", " is_light = True\n", " elif color_style == \"dark\":\n", " is_light = False\n", " back_color_ = random_color(is_light) # Randomly generate colors\n", " back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal\n", "\n", " # Draw color pictures.\n", " w, h = 50, 50\n", " img = np.zeros((h, w, 3), np.uint8)\n", " cv2.rectangle(img, (0, 0), (w, h), back_color_, thickness=-1)\n", "\n", " return back_color, back_color, img\n", "\n", "\n", "inputs = [gr.Radio([\"light\", \"dark\"], value=\"light\")]\n", "\n", "outputs = [\n", " gr.ColorPicker(label=\"color\"),\n", " gr.Textbox(label=\"hexadecimal color\"),\n", " gr.Image(type=\"numpy\", label=\"color picture\"),\n", "]\n", "\n", "title = \"Color Generator\"\n", "description = (\n", " \"Click the Submit button, and a dark or light color will be randomly generated.\"\n", ")\n", "\n", "demo = gr.Interface(\n", " fn=switch_color,\n", " inputs=inputs,\n", " outputs=outputs,\n", " title=title,\n", " description=description,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: color_generator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import cv2\n", "import numpy as np\n", "import random\n", "\n", "\n", "# Convert decimal color to hexadecimal color\n", "def RGB_to_Hex(rgb):\n", " color = \"#\"\n", " for i in rgb:\n", " num = int(i)\n", " color += str(hex(num))[-2:].replace(\"x\", \"0\").upper()\n", " return color\n", "\n", "\n", "# Randomly generate light or dark colors\n", "def random_color(is_light=True):\n", " return (\n", " random.randint(0, 127) + int(is_light) * 128,\n", " random.randint(0, 127) + int(is_light) * 128,\n", " random.randint(0, 127) + int(is_light) * 128,\n", " )\n", "\n", "\n", "def switch_color(color_style):\n", " is_light = color_style == \"light\"\n", " back_color_ = random_color(is_light) # Randomly generate colors\n", " back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal\n", "\n", " # Draw color pictures.\n", " w, h = 50, 50\n", " img = np.zeros((h, w, 3), np.uint8)\n", " cv2.rectangle(img, (0, 0), (w, h), back_color_, thickness=-1)\n", "\n", " return back_color, back_color, img\n", "\n", "\n", "inputs = [gr.Radio([\"light\", \"dark\"], value=\"light\")]\n", "\n", "outputs = [\n", " gr.ColorPicker(label=\"color\"),\n", " gr.Textbox(label=\"hexadecimal color\"),\n", " gr.Image(type=\"numpy\", label=\"color picture\"),\n", "]\n", "\n", "title = \"Color Generator\"\n", "description = (\n", " \"Click the Submit button, and a dark or light color will be randomly generated.\"\n", ")\n", "\n", "demo = gr.Interface(\n", " fn=switch_color,\n", " inputs=inputs,\n", " outputs=outputs,\n", " title=title,\n", " description=description,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -23,10 +23,7 @@ def random_color(is_light=True):
|
||||
|
||||
|
||||
def switch_color(color_style):
|
||||
if color_style == "light":
|
||||
is_light = True
|
||||
elif color_style == "dark":
|
||||
is_light = False
|
||||
is_light = color_style == "light"
|
||||
back_color_ = random_color(is_light) # Randomly generate colors
|
||||
back_color = RGB_to_Hex(back_color_) # Convert to hexadecimal
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import *\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: dashboard\n", "### This demo shows how you can build an interactive dashboard with gradio. Click on a python library on the left hand side and then on the right hand side click on the metric you'd like to see plot over time. Data is pulled from HuggingFace Hub datasets.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/dashboard/helpers.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import pandas as pd\n", "import plotly.express as px\n", "from helpers import retrieve_pip_installs, retrieve_stars, retrieve_issues\n", "\n", "\n", "LIBRARIES = [\"accelerate\", \"datasets\", \"diffusers\", \"evaluate\", \"gradio\", \"hub_docs\",\n", " \"huggingface_hub\", \"optimum\", \"pytorch_image_models\", \"tokenizers\", \"transformers\"]\n", "\n", "\n", "def create_pip_plot(libraries, pip_choices):\n", " if \"Pip\" not in pip_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_pip_installs(libraries, \"Cumulated\" in pip_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Pip installs\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_star_plot(libraries, star_choices):\n", " if \"Stars\" not in star_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_stars(libraries, \"Week over Week\" in star_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Number of stargazers\")\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "def create_issue_plot(libraries, issue_choices):\n", " if \"Issue\" not in issue_choices:\n", " return gr.Plot(visible=False)\n", " output = retrieve_issues(libraries,\n", " exclude_org_members=\"Exclude org members\" in issue_choices,\n", " week_over_week=\"Week over Week\" in issue_choices)\n", " df = pd.DataFrame(output).melt(id_vars=\"day\")\n", " plot = px.line(df, x=\"day\", y=\"value\", color=\"variable\",\n", " title=\"Cumulated number of issues, PRs, and comments\",\n", " )\n", " plot.update_layout(legend=dict(x=0.5, y=0.99), title_x=0.5, legend_title_text=\"\")\n", " return gr.Plot(value=plot, visible=True)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " gr.Markdown(\"## Select libraries to display\")\n", " libraries = gr.CheckboxGroup(choices=LIBRARIES, show_label=False)\n", " with gr.Column():\n", " gr.Markdown(\"## Select graphs to display\")\n", " pip = gr.CheckboxGroup(choices=[\"Pip\", \"Cumulated\"], show_label=False)\n", " stars = gr.CheckboxGroup(choices=[\"Stars\", \"Week over Week\"], show_label=False)\n", " issues = gr.CheckboxGroup(choices=[\"Issue\", \"Exclude org members\", \"week over week\"], show_label=False)\n", " with gr.Row():\n", " fetch = gr.Button(value=\"Fetch\")\n", " with gr.Row():\n", " with gr.Column():\n", " pip_plot = gr.Plot(visible=False)\n", " star_plot = gr.Plot(visible=False)\n", " issue_plot = gr.Plot(visible=False)\n", "\n", " fetch.click(create_pip_plot, inputs=[libraries, pip], outputs=pip_plot)\n", " fetch.click(create_star_plot, inputs=[libraries, stars], outputs=star_plot)\n", " fetch.click(create_issue_plot, inputs=[libraries, issues], outputs=issue_plot)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,7 +1,7 @@
|
||||
import gradio as gr
|
||||
import pandas as pd
|
||||
import plotly.express as px
|
||||
from helpers import *
|
||||
from helpers import retrieve_pip_installs, retrieve_stars, retrieve_issues
|
||||
|
||||
|
||||
LIBRARIES = ["accelerate", "datasets", "diffusers", "evaluate", "gradio", "hub_docs",
|
||||
|
File diff suppressed because one or more lines are too long
@ -17,11 +17,11 @@ def process_image(image_path):
|
||||
Image.Resampling.LANCZOS)
|
||||
|
||||
# prepare image for the model
|
||||
encoding = feature_extractor(image, return_tensors="pt")
|
||||
encoding = feature_extractor(image, return_tensors="pt") # type: ignore
|
||||
|
||||
# forward pass
|
||||
with torch.no_grad():
|
||||
outputs = model(**encoding)
|
||||
outputs = model(**encoding) # type: ignore
|
||||
predicted_depth = outputs.predicted_depth
|
||||
|
||||
# interpolate to original size
|
||||
@ -105,8 +105,8 @@ iface = gr.Interface(fn=process_image,
|
||||
inputs=[gr.Image(
|
||||
type="filepath", label="Input Image")],
|
||||
outputs=[gr.Image(label="predicted depth", type="pil"),
|
||||
gr.Model3D(label="3d mesh reconstruction", clear_color=[
|
||||
1.0, 1.0, 1.0, 1.0]),
|
||||
gr.Model3D(label="3d mesh reconstruction", clear_color=(
|
||||
1.0, 1.0, 1.0, 1.0)),
|
||||
gr.File(label="3d gLTF")],
|
||||
title=title,
|
||||
description=description,
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: diffusers_with_batching"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers diffusers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import torch\n", "from diffusers import DiffusionPipeline\n", "import gradio as gr\n", "\n", "generator = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n", "# move to GPU if available\n", "if torch.cuda.is_available():\n", " generator = generator.to(\"cuda\")\n", "\n", "def generate(prompts):\n", " images = generator(list(prompts)).images\n", " return [images]\n", "\n", "demo = gr.Interface(generate, \n", " \"textbox\", \n", " \"image\", \n", " batch=True, \n", " max_batch_size=4 # Set the batch size based on your CPU/GPU memory\n", ").queue()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: diffusers_with_batching"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers diffusers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import torch\n", "from diffusers import DiffusionPipeline # type: ignore\n", "import gradio as gr\n", "\n", "generator = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n", "# move to GPU if available\n", "if torch.cuda.is_available():\n", " generator = generator.to(\"cuda\")\n", "\n", "def generate(prompts):\n", " images = generator(list(prompts)).images # type: ignore\n", " return [images]\n", "\n", "demo = gr.Interface(generate, \n", " \"textbox\", \n", " \"image\", \n", " batch=True, \n", " max_batch_size=4 # Set the batch size based on your CPU/GPU memory\n", ").queue()\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,5 +1,5 @@
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
from diffusers import DiffusionPipeline # type: ignore
|
||||
import gradio as gr
|
||||
|
||||
generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
|
||||
@ -8,7 +8,7 @@ if torch.cuda.is_available():
|
||||
generator = generator.to("cuda")
|
||||
|
||||
def generate(prompts):
|
||||
images = generator(list(prompts)).images
|
||||
images = generator(list(prompts)).images # type: ignore
|
||||
return [images]
|
||||
|
||||
demo = gr.Interface(generate,
|
||||
|
@ -1 +0,0 @@
|
||||
tensorflow
|
@ -1 +0,0 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: digit_classifier"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from urllib.request import urlretrieve\n", "\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "urlretrieve(\n", " \"https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5\", \"mnist-model.h5\"\n", ")\n", "model = tf.keras.models.load_model(\"mnist-model.h5\")\n", "\n", "\n", "def recognize_digit(image):\n", " image = image.reshape(1, -1)\n", " prediction = model.predict(image).tolist()[0]\n", " return {str(i): prediction[i] for i in range(10)}\n", "\n", "\n", "im = gr.Image(shape=(28, 28), image_mode=\"L\", invert_colors=False, source=\"canvas\")\n", "\n", "demo = gr.Interface(\n", " recognize_digit,\n", " im,\n", " gr.Label(num_top_classes=3),\n", " live=True,\n", " capture_session=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,30 +0,0 @@
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
import tensorflow as tf
|
||||
|
||||
import gradio as gr
|
||||
|
||||
urlretrieve(
|
||||
"https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5", "mnist-model.h5"
|
||||
)
|
||||
model = tf.keras.models.load_model("mnist-model.h5")
|
||||
|
||||
|
||||
def recognize_digit(image):
|
||||
image = image.reshape(1, -1)
|
||||
prediction = model.predict(image).tolist()[0]
|
||||
return {str(i): prediction[i] for i in range(10)}
|
||||
|
||||
|
||||
im = gr.Image(shape=(28, 28), image_mode="L", invert_colors=False, source="canvas")
|
||||
|
||||
demo = gr.Interface(
|
||||
recognize_digit,
|
||||
im,
|
||||
gr.Label(num_top_classes=3),
|
||||
live=True,
|
||||
capture_session=True,
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
Binary file not shown.
Before Width: | Height: | Size: 40 KiB |
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: english_translator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "pipe = pipeline(\"translation\", model=\"t5-base\")\n", "\n", "\n", "def translate(text):\n", " return pipe(text)[0][\"translation_text\"]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " english = gr.Textbox(label=\"English text\")\n", " translate_btn = gr.Button(value=\"Translate\")\n", " with gr.Column():\n", " german = gr.Textbox(label=\"German Text\")\n", "\n", " translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n", " examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n", " inputs=[english])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: english_translator"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "pipe = pipeline(\"translation\", model=\"t5-base\")\n", "\n", "\n", "def translate(text):\n", " return pipe(text)[0][\"translation_text\"] # type: ignore\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " english = gr.Textbox(label=\"English text\")\n", " translate_btn = gr.Button(value=\"Translate\")\n", " with gr.Column():\n", " german = gr.Textbox(label=\"German Text\")\n", "\n", " translate_btn.click(translate, inputs=english, outputs=german, api_name=\"translate-to-german\")\n", " examples = gr.Examples(examples=[\"I went to the supermarket yesterday.\", \"Helen is a good swimmer.\"],\n", " inputs=[english])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -6,7 +6,7 @@ pipe = pipeline("translation", model="t5-base")
|
||||
|
||||
|
||||
def translate(text):
|
||||
return pipe(text)[0]["translation_text"]
|
||||
return pipe(text)[0]["translation_text"] # type: ignore
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "current_file_path = Path(__file__).resolve()\n", "relative_path = \"path/to/file\"\n", "absolute_path = (current_file_path.parent / \"..\" / \"..\" / \"gradio\").resolve()\n", "\n", "\n", "def get_file_content(file):\n", " return (file,)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown('### `FileExplorer` to `FileExplorer` -- `file_count=\"multiple\"`')\n", " submit_btn = gr.Button(\"Select\")\n", " with gr.Row():\n", " file = gr.FileExplorer(\n", " glob=\"**/components/*.py\",\n", " # value=[\"themes/utils\"],\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", "\n", " file2 = gr.FileExplorer(\n", " glob=\"**/components/**/*.py\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", " submit_btn.click(lambda x: x, file, file2)\n", "\n", " gr.Markdown(\"---\")\n", " gr.Markdown('### `FileExplorer` to `Code` -- `file_count=\"single\"`')\n", " with gr.Group():\n", " with gr.Row():\n", " file_3 = gr.FileExplorer(\n", " scale=1,\n", " glob=\"**/components/**/*.py\",\n", " value=[\"themes/utils\"],\n", " file_count=\"single\",\n", " root=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " elem_id=\"file\",\n", " )\n", "\n", " code = gr.Code(lines=30, scale=2, language=\"python\")\n", "\n", " file_3.change(get_file_content, file_3, code)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: file_explorer"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pathlib import Path\n", "\n", "current_file_path = Path(__file__).resolve()\n", "relative_path = \"path/to/file\"\n", "absolute_path = (current_file_path.parent / \"..\" / \"..\" / \"gradio\").resolve()\n", "\n", "\n", "def get_file_content(file):\n", " return (file,)\n", "\n", "\n", "with gr.Blocks() as demo:\n", " gr.Markdown('### `FileExplorer` to `FileExplorer` -- `file_count=\"multiple\"`')\n", " submit_btn = gr.Button(\"Select\")\n", " with gr.Row():\n", " file = gr.FileExplorer(\n", " glob=\"**/components/*.py\",\n", " # value=[\"themes/utils\"],\n", " root_dir=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", "\n", " file2 = gr.FileExplorer(\n", " glob=\"**/components/**/*.py\",\n", " root_dir=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " )\n", " submit_btn.click(lambda x: x, file, file2)\n", "\n", " gr.Markdown(\"---\")\n", " gr.Markdown('### `FileExplorer` to `Code` -- `file_count=\"single\"`')\n", " with gr.Group():\n", " with gr.Row():\n", " file_3 = gr.FileExplorer(\n", " scale=1,\n", " glob=\"**/components/**/*.py\",\n", " value=[\"themes/utils\"],\n", " file_count=\"single\",\n", " root_dir=absolute_path,\n", " ignore_glob=\"**/__init__.py\",\n", " elem_id=\"file\",\n", " )\n", "\n", " code = gr.Code(lines=30, scale=2, language=\"python\")\n", "\n", " file_3.change(get_file_content, file_3, code)\n", "\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -17,13 +17,13 @@ with gr.Blocks() as demo:
|
||||
file = gr.FileExplorer(
|
||||
glob="**/components/*.py",
|
||||
# value=["themes/utils"],
|
||||
root=absolute_path,
|
||||
root_dir=absolute_path,
|
||||
ignore_glob="**/__init__.py",
|
||||
)
|
||||
|
||||
file2 = gr.FileExplorer(
|
||||
glob="**/components/**/*.py",
|
||||
root=absolute_path,
|
||||
root_dir=absolute_path,
|
||||
ignore_glob="**/__init__.py",
|
||||
)
|
||||
submit_btn.click(lambda x: x, file, file2)
|
||||
@ -37,7 +37,7 @@ with gr.Blocks() as demo:
|
||||
glob="**/components/**/*.py",
|
||||
value=["themes/utils"],
|
||||
file_count="single",
|
||||
root=absolute_path,
|
||||
root_dir=absolute_path,
|
||||
ignore_glob="**/__init__.py",
|
||||
elem_id="file",
|
||||
)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_english_german"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "english_translator = gr.load(name=\"spaces/gradio/english_translator\")\n", "english_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n", "\n", "\n", "def generate_text(text):\n", " english_text = english_generator(text)[0][\"generated_text\"]\n", " german_text = english_translator(english_text)\n", " return english_text, german_text\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " seed = gr.Text(label=\"Input Phrase\")\n", " with gr.Column():\n", " english = gr.Text(label=\"Generated English Text\")\n", " german = gr.Text(label=\"Generated German Text\")\n", " btn = gr.Button(\"Generate\")\n", " btn.click(generate_text, inputs=[seed], outputs=[english, german])\n", " gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_english_german"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio transformers torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import pipeline\n", "\n", "english_translator = gr.load(name=\"spaces/gradio/english_translator\")\n", "english_generator = pipeline(\"text-generation\", model=\"distilgpt2\")\n", "\n", "\n", "def generate_text(text):\n", " english_text = english_generator(text)[0][\"generated_text\"] # type: ignore\n", " german_text = english_translator(english_text)\n", " return english_text, german_text\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " with gr.Column():\n", " seed = gr.Text(label=\"Input Phrase\")\n", " with gr.Column():\n", " english = gr.Text(label=\"Generated English Text\")\n", " german = gr.Text(label=\"Generated German Text\")\n", " btn = gr.Button(\"Generate\")\n", " btn.click(generate_text, inputs=[seed], outputs=[english, german])\n", " gr.Examples([\"My name is Clara and I am\"], inputs=[seed])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -7,7 +7,7 @@ english_generator = pipeline("text-generation", model="distilgpt2")
|
||||
|
||||
|
||||
def generate_text(text):
|
||||
english_text = english_generator(text)[0]["generated_text"]
|
||||
english_text = english_generator(text)[0]["generated_text"] # type: ignore
|
||||
german_text = english_translator(english_text)
|
||||
return english_text, german_text
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_tone"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "notes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n", "\n", "def generate_tone(note, octave, duration):\n", " sr = 48000\n", " a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n", " frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n", " duration = int(duration)\n", " audio = np.linspace(0, duration, duration * sr)\n", " audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n", " return sr, audio\n", "\n", "demo = gr.Interface(\n", " generate_tone,\n", " [\n", " gr.Dropdown(notes, type=\"index\"),\n", " gr.Slider(4, 6, step=1),\n", " gr.Textbox(value=1, label=\"Duration in seconds\"),\n", " ],\n", " \"audio\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: generate_tone"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import numpy as np\n", "import gradio as gr\n", "\n", "notes = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\n", "\n", "def generate_tone(note, octave, duration):\n", " sr = 48000\n", " a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)\n", " frequency = a4_freq * 2 ** (tones_from_a4 / 12)\n", " duration = int(duration)\n", " audio = np.linspace(0, duration, duration * sr)\n", " audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)\n", " return sr, audio\n", "\n", "demo = gr.Interface(\n", " generate_tone,\n", " [\n", " gr.Dropdown(notes, type=\"index\"),\n", " gr.Slider(4, 6, step=1),\n", " gr.Textbox(value=\"1\", label=\"Duration in seconds\"),\n", " ],\n", " \"audio\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -17,7 +17,7 @@ demo = gr.Interface(
|
||||
[
|
||||
gr.Dropdown(notes, type="index"),
|
||||
gr.Slider(4, 6, step=1),
|
||||
gr.Textbox(value=1, label="Duration in seconds"),
|
||||
gr.Textbox(value="1", label="Duration in seconds"),
|
||||
],
|
||||
"audio",
|
||||
)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gif_maker"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import cv2\n", "import gradio as gr\n", "\n", "def gif_maker(img_files):\n", " img_array = []\n", " for filename in img_files:\n", " img = cv2.imread(filename.name)\n", " height, width, _ = img.shape\n", " size = (width,height)\n", " img_array.append(img)\n", " output_file = \"test.mp4\"\n", " out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'h264'), 15, size) \n", " for i in range(len(img_array)):\n", " out.write(img_array[i])\n", " out.release()\n", " return output_file\n", "\n", "demo = gr.Interface(gif_maker, inputs=gr.File(file_count=\"multiple\"), outputs=gr.Video())\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: gif_maker"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio opencv-python"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import cv2\n", "import gradio as gr\n", "\n", "def gif_maker(img_files):\n", " img_array = []\n", " size = (1, 1)\n", " for filename in img_files:\n", " img = cv2.imread(filename.name)\n", " height, width, _ = img.shape\n", " size = (width,height)\n", " img_array.append(img)\n", " output_file = \"test.mp4\"\n", " out = cv2.VideoWriter(output_file,cv2.VideoWriter_fourcc(*'h264'), 15, size) \n", " for i in range(len(img_array)):\n", " out.write(img_array[i])\n", " out.release()\n", " return output_file\n", "\n", "demo = gr.Interface(gif_maker, inputs=gr.File(file_count=\"multiple\"), outputs=gr.Video())\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -3,6 +3,7 @@ import gradio as gr
|
||||
|
||||
def gif_maker(img_files):
|
||||
img_array = []
|
||||
size = (1, 1)
|
||||
for filename in img_files:
|
||||
img = cv2.imread(filename.name)
|
||||
height, width, _ = img.shape
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "\n", "image = gr.Image(shape=(224, 224))\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image,\n", " inputs=image,\n", " outputs=label,\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\")\n", " ]\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_classifier"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy tensorflow"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/imagenet_labels.json https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/files/imagenet_labels.json\n", "os.mkdir('images')\n", "!wget -q -O images/cheetah1.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/cheetah1.jpg\n", "!wget -q -O images/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/image_classifier/images/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import os\n", "import requests\n", "import tensorflow as tf\n", "\n", "import gradio as gr\n", "\n", "inception_net = tf.keras.applications.MobileNetV2() # load the model\n", "\n", "# Download human-readable labels for ImageNet.\n", "response = requests.get(\"https://git.io/JJkYN\")\n", "labels = response.text.split(\"\\n\")\n", "\n", "\n", "def classify_image(inp):\n", " inp = inp.reshape((-1, 224, 224, 3))\n", " inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)\n", " prediction = inception_net.predict(inp).flatten()\n", " return {labels[i]: float(prediction[i]) for i in range(1000)}\n", "\n", "\n", "image = gr.Image()\n", "label = gr.Label(num_top_classes=3)\n", "\n", "demo = gr.Interface(\n", " fn=classify_image,\n", " inputs=image,\n", " outputs=label,\n", " examples=[\n", " os.path.join(os.path.abspath(''), \"images/cheetah1.jpg\"),\n", " os.path.join(os.path.abspath(''), \"images/lion.jpg\")\n", " ]\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n", "\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -18,7 +18,7 @@ def classify_image(inp):
|
||||
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
||||
|
||||
|
||||
image = gr.Image(shape=(224, 224))
|
||||
image = gr.Image()
|
||||
label = gr.Label(num_top_classes=3)
|
||||
|
||||
demo = gr.Interface(
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_state"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def store_message(message: str, history: list[str]):\n", " output = {\n", " \"Current messages\": message,\n", " \"Previous messages\": history[::-1]\n", " }\n", " history.append(message)\n", " return output, history\n", "\n", "demo = gr.Interface(fn=store_message, \n", " inputs=[\"textbox\", gr.State(value=[])], \n", " outputs=[\"json\", gr.State()])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_state"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "def store_message(message: str, history: list[str]): # type: ignore\n", " output = {\n", " \"Current messages\": message,\n", " \"Previous messages\": history[::-1]\n", " }\n", " history.append(message)\n", " return output, history\n", "\n", "demo = gr.Interface(fn=store_message,\n", " inputs=[\"textbox\", gr.State(value=[])], \n", " outputs=[\"json\", gr.State()])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,6 @@
|
||||
import gradio as gr
|
||||
|
||||
def store_message(message: str, history: list[str]):
|
||||
def store_message(message: str, history: list[str]): # type: ignore
|
||||
output = {
|
||||
"Current messages": message,
|
||||
"Previous messages": history[::-1]
|
||||
@ -8,7 +8,7 @@ def store_message(message: str, history: list[str]):
|
||||
history.append(message)
|
||||
return output, history
|
||||
|
||||
demo = gr.Interface(fn=store_message,
|
||||
demo = gr.Interface(fn=store_message,
|
||||
inputs=["textbox", gr.State(value=[])],
|
||||
outputs=["json", gr.State()])
|
||||
|
||||
|
File diff suppressed because one or more lines are too long
@ -87,20 +87,20 @@ def update_table(
|
||||
size_query: list,
|
||||
query: str,
|
||||
):
|
||||
filtered_df = filter_models(hidden_df, type_query, size_query, precision_query)
|
||||
filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) # type: ignore
|
||||
filtered_df = filter_queries(query, filtered_df)
|
||||
df = select_columns(filtered_df, columns)
|
||||
return df
|
||||
|
||||
|
||||
def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame:
|
||||
return df[(df["model_name_for_query"].str.contains(query, case=False))]
|
||||
return df[(df["model_name_for_query"].str.contains(query, case=False))] # type: ignore
|
||||
|
||||
|
||||
def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
||||
# We use COLS to maintain sorting
|
||||
filtered_df = df[[c for c in COLS if c in df.columns and c in columns]]
|
||||
return filtered_df
|
||||
return filtered_df # type: ignore
|
||||
|
||||
|
||||
def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
|
||||
@ -115,7 +115,7 @@ def filter_queries(query: str, filtered_df: pd.DataFrame) -> pd.DataFrame:
|
||||
final_df.append(temp_filtered_df)
|
||||
if len(final_df) > 0:
|
||||
filtered_df = pd.concat(final_df)
|
||||
filtered_df = filtered_df.drop_duplicates(
|
||||
filtered_df = filtered_df.drop_duplicates( # type: ignore
|
||||
subset=["Model", "Precision", "Model sha"]
|
||||
)
|
||||
|
||||
@ -136,10 +136,10 @@ def filter_models(
|
||||
filtered_df = filtered_df.loc[df["Precision"].isin(precision_query + ["None"])]
|
||||
|
||||
numeric_interval = pd.IntervalIndex(
|
||||
sorted([NUMERIC_INTERVALS[s] for s in size_query])
|
||||
sorted([NUMERIC_INTERVALS[s] for s in size_query]) # type: ignore
|
||||
)
|
||||
params_column = pd.to_numeric(df["#Params (B)"], errors="coerce")
|
||||
mask = params_column.apply(lambda x: any(numeric_interval.contains(x)))
|
||||
mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) # type: ignore
|
||||
filtered_df = filtered_df.loc[mask]
|
||||
|
||||
return filtered_df
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/sofia.stl https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/sofia.stl\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=[0.0, 0.0, 0.0, 0.0], label=\"3D Model\", display_mode=\"wireframe\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/sofia.stl\")],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat\"],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply\"],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: model3D"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/Bunny.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Bunny.obj\n", "!wget -q -O files/Duck.glb https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Duck.glb\n", "!wget -q -O files/Fox.gltf https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/Fox.gltf\n", "!wget -q -O files/face.obj https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/face.obj\n", "!wget -q -O files/sofia.stl https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/sofia.stl\n", "!wget -q -O files/source.txt https://github.com/gradio-app/gradio/raw/main/demo/model3D/files/source.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "def load_mesh(mesh_file_name):\n", " return mesh_file_name\n", "\n", "demo = gr.Interface(\n", " fn=load_mesh,\n", " inputs=gr.Model3D(),\n", " outputs=gr.Model3D(\n", " clear_color=(0.0, 0.0, 0.0, 0.0), label=\"3D Model\", display_mode=\"wireframe\"),\n", " examples=[\n", " [os.path.join(os.path.abspath(''), \"files/Bunny.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/Duck.glb\")],\n", " [os.path.join(os.path.abspath(''), \"files/Fox.gltf\")],\n", " [os.path.join(os.path.abspath(''), \"files/face.obj\")],\n", " [os.path.join(os.path.abspath(''), \"files/sofia.stl\")],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/bonsai/bonsai-7k-mini.splat\"],\n", " [\"https://huggingface.co/datasets/dylanebert/3dgs/resolve/main/luigi/luigi.ply\"],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -9,7 +9,7 @@ demo = gr.Interface(
|
||||
fn=load_mesh,
|
||||
inputs=gr.Model3D(),
|
||||
outputs=gr.Model3D(
|
||||
clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model", display_mode="wireframe"),
|
||||
clear_color=(0.0, 0.0, 0.0, 0.0), label="3D Model", display_mode="wireframe"),
|
||||
examples=[
|
||||
[os.path.join(os.path.dirname(__file__), "files/Bunny.obj")],
|
||||
[os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: progress_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "def slowly_reverse(word, progress=gr.Progress()):\n", " progress(0, desc=\"Starting\")\n", " time.sleep(1)\n", " progress(0.05)\n", " new_string = \"\"\n", " for letter in progress.tqdm(word, desc=\"Reversing\"):\n", " time.sleep(0.25)\n", " new_string = letter + new_string\n", " return new_string\n", "\n", "demo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: progress_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "def slowly_reverse(word, progress=gr.Progress()):\n", " progress(0, desc=\"Starting\")\n", " time.sleep(1)\n", " progress(0.05)\n", " new_string = \"\"\n", " for letter in progress.tqdm(word, desc=\"Reversing\"):\n", " time.sleep(0.25)\n", " new_string = letter + new_string # type: ignore\n", " return new_string\n", "\n", "demo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -8,7 +8,7 @@ def slowly_reverse(word, progress=gr.Progress()):
|
||||
new_string = ""
|
||||
for letter in progress.tqdm(word, desc="Reversing"):
|
||||
time.sleep(0.25)
|
||||
new_string = letter + new_string
|
||||
new_string = letter + new_string # type: ignore
|
||||
return new_string
|
||||
|
||||
demo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: question-answering"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", "\n", "model_name = \"deepset/roberta-base-squad2\"\n", "\n", "nlp = pipeline(\"question-answering\", model=model_name, tokenizer=model_name)\n", "\n", "context = \"The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.\"\n", "question = \"Which continent is the Amazon rainforest in?\"\n", "\n", "\n", "def predict(context, question):\n", " res = nlp({\"question\": question, \"context\": context})\n", " return res[\"answer\"], res[\"score\"]\n", "\n", "\n", "gr.Interface(\n", " predict,\n", " inputs=[\n", " gr.Textbox(lines=7, value=context, label=\"Context Paragraph\"),\n", " gr.Textbox(lines=2, value=question, label=\"Question\"),\n", " ],\n", " outputs=[gr.Textbox(label=\"Answer\"), gr.Textbox(label=\"Score\")],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: question-answering"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n", "\n", "model_name = \"deepset/roberta-base-squad2\"\n", "\n", "nlp = pipeline(\"question-answering\", model=model_name, tokenizer=model_name)\n", "\n", "context = \"The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.\"\n", "question = \"Which continent is the Amazon rainforest in?\"\n", "\n", "\n", "def predict(context, question):\n", " res = nlp({\"question\": question, \"context\": context})\n", " return res[\"answer\"], res[\"score\"] # type: ignore\n", "\n", "\n", "gr.Interface(\n", " predict,\n", " inputs=[\n", " gr.Textbox(lines=7, value=context, label=\"Context Paragraph\"),\n", " gr.Textbox(lines=2, value=question, label=\"Question\"),\n", " ],\n", " outputs=[gr.Textbox(label=\"Answer\"), gr.Textbox(label=\"Score\")],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -12,7 +12,7 @@ question = "Which continent is the Amazon rainforest in?"
|
||||
|
||||
def predict(context, question):
|
||||
res = nlp({"question": question, "context": context})
|
||||
return res["answer"], res["score"]
|
||||
return res["answer"], res["score"] # type: ignore
|
||||
|
||||
|
||||
gr.Interface(
|
||||
|
File diff suppressed because one or more lines are too long
@ -97,7 +97,7 @@ interface = gr.Interface(
|
||||
inputs=inputs,
|
||||
outputs=output,
|
||||
layout="horizontal",
|
||||
allow_flagging=False,
|
||||
allow_flagging="never",
|
||||
live=False,
|
||||
examples=examples,
|
||||
cache_examples=False
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe(\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]):\n", " if images_list[\"nsfw_content_detected\"][i]:\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " height=\"auto\",\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stable-diffusion\n", "### Note: This is a simplified version of the code needed to create the Stable Diffusion demo. See full code here: https://hf.co/spaces/stabilityai/stable-diffusion/tree/main\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio diffusers transformers nvidia-ml-py3 ftfy torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import torch\n", "from diffusers import StableDiffusionPipeline # type: ignore\n", "from PIL import Image\n", "import os\n", "\n", "auth_token = os.getenv(\"auth_token\")\n", "model_id = \"CompVis/stable-diffusion-v1-4\"\n", "device = \"cpu\"\n", "pipe = StableDiffusionPipeline.from_pretrained(\n", " model_id, use_auth_token=auth_token, revision=\"fp16\", torch_dtype=torch.float16\n", ")\n", "pipe = pipe.to(device)\n", "\n", "\n", "def infer(prompt, samples, steps, scale, seed):\n", " generator = torch.Generator(device=device).manual_seed(seed)\n", " images_list = pipe( # type: ignore\n", " [prompt] * samples,\n", " num_inference_steps=steps,\n", " guidance_scale=scale,\n", " generator=generator,\n", " )\n", " images = []\n", " safe_image = Image.open(r\"unsafe.png\")\n", " for i, image in enumerate(images_list[\"sample\"]): # type: ignore\n", " if images_list[\"nsfw_content_detected\"][i]: # type: ignore\n", " images.append(safe_image)\n", " else:\n", " images.append(image)\n", " return images\n", "\n", "\n", "block = gr.Blocks()\n", "\n", "with block:\n", " with gr.Group():\n", " with gr.Row():\n", " text = gr.Textbox(\n", " label=\"Enter your prompt\",\n", " max_lines=1,\n", " placeholder=\"Enter your prompt\",\n", " container=False,\n", " )\n", " btn = gr.Button(\"Generate image\")\n", " gallery = gr.Gallery(\n", " label=\"Generated images\",\n", " show_label=False,\n", " elem_id=\"gallery\",\n", " columns=[2],\n", " )\n", "\n", " advanced_button = gr.Button(\"Advanced options\", elem_id=\"advanced-btn\")\n", "\n", " with gr.Row(elem_id=\"advanced-options\"):\n", " samples = gr.Slider(label=\"Images\", minimum=1, maximum=4, value=4, step=1)\n", " steps = gr.Slider(label=\"Steps\", minimum=1, maximum=50, value=45, step=1)\n", " scale = gr.Slider(\n", " label=\"Guidance Scale\", minimum=0, maximum=50, value=7.5, step=0.1\n", " )\n", " seed = gr.Slider(\n", " label=\"Seed\",\n", " minimum=0,\n", " maximum=2147483647,\n", " step=1,\n", " randomize=True,\n", " )\n", " gr.on([text.submit, btn.click], infer, inputs=[text, samples, steps, scale, seed], outputs=gallery)\n", " advanced_button.click(\n", " None,\n", " [],\n", " text,\n", " )\n", "\n", "block.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -1,6 +1,6 @@
|
||||
import gradio as gr
|
||||
import torch
|
||||
from diffusers import StableDiffusionPipeline
|
||||
from diffusers import StableDiffusionPipeline # type: ignore
|
||||
from PIL import Image
|
||||
import os
|
||||
|
||||
@ -15,7 +15,7 @@ pipe = pipe.to(device)
|
||||
|
||||
def infer(prompt, samples, steps, scale, seed):
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
images_list = pipe(
|
||||
images_list = pipe( # type: ignore
|
||||
[prompt] * samples,
|
||||
num_inference_steps=steps,
|
||||
guidance_scale=scale,
|
||||
@ -23,8 +23,8 @@ def infer(prompt, samples, steps, scale, seed):
|
||||
)
|
||||
images = []
|
||||
safe_image = Image.open(r"unsafe.png")
|
||||
for i, image in enumerate(images_list["sample"]):
|
||||
if images_list["nsfw_content_detected"][i]:
|
||||
for i, image in enumerate(images_list["sample"]): # type: ignore
|
||||
if images_list["nsfw_content_detected"][i]: # type: ignore
|
||||
images.append(safe_image)
|
||||
else:
|
||||
images.append(image)
|
||||
@ -48,7 +48,6 @@ with block:
|
||||
show_label=False,
|
||||
elem_id="gallery",
|
||||
columns=[2],
|
||||
height="auto",
|
||||
)
|
||||
|
||||
advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_cleanup"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "import gradio as gr\n", "import numpy as np\n", "from PIL import Image\n", "from pathlib import Path\n", "import secrets\n", "import shutil\n", "\n", "current_dir = Path(__file__).parent\n", "\n", "\n", "def generate_random_img(history: list[Image.Image], request: gr.Request):\n", " \"\"\"Generate a random red, green, blue, orange, yellor or purple image.\"\"\"\n", " colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 165, 0), (255, 255, 0), (128, 0, 128)]\n", " color = colors[np.random.randint(0, len(colors))]\n", " img = Image.new('RGB', (100, 100), color)\n", "\n", " user_dir: Path = current_dir / request.session_hash\n", " user_dir.mkdir(exist_ok=True)\n", " path = user_dir / f\"{secrets.token_urlsafe(8)}.webp\"\n", "\n", " img.save(path)\n", " history.append(img)\n", "\n", " return img, history, history\n", "\n", "def delete_directory(req: gr.Request):\n", " if not req.username:\n", " return\n", " user_dir: Path = current_dir / req.username\n", " shutil.rmtree(str(user_dir))\n", "\n", "with gr.Blocks(delete_cache=(60, 3600)) as demo:\n", " gr.Markdown(\"\"\"# State Cleanup Demo\n", " \ud83d\uddbc\ufe0f Images are saved in a user-specific directory and deleted when the users closes the page via demo.unload.\n", " \"\"\")\n", " with gr.Row():\n", " with gr.Column(scale=1):\n", " with gr.Row():\n", " img = gr.Image(label=\"Generated Image\", height=300, width=300)\n", " with gr.Row():\n", " gen = gr.Button(value=\"Generate\")\n", " with gr.Row():\n", " history = gr.Gallery(label=\"Previous Generations\", height=500, columns=10)\n", " state = gr.State(value=[], delete_callback=lambda v: print(\"STATE DELETED\"))\n", "\n", " demo.load(generate_random_img, [state], [img, state, history])\n", " gen.click(generate_random_img, [state], [img, state, history])\n", " demo.unload(delete_directory)\n", "\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: state_cleanup"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from __future__ import annotations\n", "import gradio as gr\n", "import numpy as np\n", "from PIL import Image\n", "from pathlib import Path\n", "import secrets\n", "import shutil\n", "\n", "current_dir = Path(__file__).parent\n", "\n", "\n", "def generate_random_img(history: list[Image.Image], request: gr.Request):\n", " \"\"\"Generate a random red, green, blue, orange, yellor or purple image.\"\"\"\n", " colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 165, 0), (255, 255, 0), (128, 0, 128)]\n", " color = colors[np.random.randint(0, len(colors))]\n", " img = Image.new('RGB', (100, 100), color)\n", "\n", " user_dir: Path = current_dir / str(request.session_hash)\n", " user_dir.mkdir(exist_ok=True)\n", " path = user_dir / f\"{secrets.token_urlsafe(8)}.webp\"\n", "\n", " img.save(path)\n", " history.append(img)\n", "\n", " return img, history, history\n", "\n", "def delete_directory(req: gr.Request):\n", " if not req.username:\n", " return\n", " user_dir: Path = current_dir / req.username\n", " shutil.rmtree(str(user_dir))\n", "\n", "with gr.Blocks(delete_cache=(60, 3600)) as demo:\n", " gr.Markdown(\"\"\"# State Cleanup Demo\n", " \ud83d\uddbc\ufe0f Images are saved in a user-specific directory and deleted when the users closes the page via demo.unload.\n", " \"\"\")\n", " with gr.Row():\n", " with gr.Column(scale=1):\n", " with gr.Row():\n", " img = gr.Image(label=\"Generated Image\", height=300, width=300)\n", " with gr.Row():\n", " gen = gr.Button(value=\"Generate\")\n", " with gr.Row():\n", " history = gr.Gallery(label=\"Previous Generations\", height=500, columns=10)\n", " state = gr.State(value=[], delete_callback=lambda v: print(\"STATE DELETED\"))\n", "\n", " demo.load(generate_random_img, [state], [img, state, history])\n", " gen.click(generate_random_img, [state], [img, state, history])\n", " demo.unload(delete_directory)\n", "\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -15,7 +15,7 @@ def generate_random_img(history: list[Image.Image], request: gr.Request):
|
||||
color = colors[np.random.randint(0, len(colors))]
|
||||
img = Image.new('RGB', (100, 100), color)
|
||||
|
||||
user_dir: Path = current_dir / request.session_hash
|
||||
user_dir: Path = current_dir / str(request.session_hash)
|
||||
user_dir.mkdir(exist_ok=True)
|
||||
path = user_dir / f"{secrets.token_urlsafe(8)}.webp"
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(stream, new_chunk):\n", " sr, y = new_chunk\n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " if stream is not None:\n", " stream = np.concatenate([stream, y])\n", " else:\n", " stream = y\n", " return stream, transcriber({\"sampling_rate\": sr, \"raw\": stream})[\"text\"]\n", "\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " [\"state\", gr.Audio(sources=[\"microphone\"], streaming=True)],\n", " [\"state\", \"text\"],\n", " live=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_asr"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch torchaudio transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "import numpy as np\n", "\n", "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n", "\n", "def transcribe(stream, new_chunk):\n", " sr, y = new_chunk\n", " y = y.astype(np.float32)\n", " y /= np.max(np.abs(y))\n", "\n", " if stream is not None:\n", " stream = np.concatenate([stream, y])\n", " else:\n", " stream = y\n", " return stream, transcriber({\"sampling_rate\": sr, \"raw\": stream})[\"text\"] # type: ignore\n", "\n", "\n", "demo = gr.Interface(\n", " transcribe,\n", " [\"state\", gr.Audio(sources=[\"microphone\"], streaming=True)],\n", " [\"state\", \"text\"],\n", " live=True,\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -13,7 +13,7 @@ def transcribe(stream, new_chunk):
|
||||
stream = np.concatenate([stream, y])
|
||||
else:
|
||||
stream = y
|
||||
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
|
||||
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"] # type: ignore
|
||||
|
||||
|
||||
demo = gr.Interface(
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/stream_audio_out/audio/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pydub import AudioSegment\n", "from time import sleep\n", "\n", "with gr.Blocks() as demo:\n", " input_audio = gr.Audio(label=\"Input Audio\", type=\"filepath\", format=\"mp3\")\n", " with gr.Row():\n", " with gr.Column():\n", " stream_as_file_btn = gr.Button(\"Stream as File\")\n", " format = gr.Radio([\"wav\", \"mp3\"], value=\"wav\", label=\"Format\")\n", " stream_as_file_output = gr.Audio(streaming=True)\n", "\n", " def stream_file(audio_file, format):\n", " audio = AudioSegment.from_file(audio_file)\n", " i = 0\n", " chunk_size = 1000\n", " while chunk_size * i < len(audio):\n", " chunk = audio[chunk_size * i : chunk_size * (i + 1)]\n", " i += 1\n", " if chunk:\n", " file = f\"/tmp/{i}.{format}\"\n", " chunk.export(file, format=format)\n", " yield file\n", " sleep(0.5)\n", "\n", " stream_as_file_btn.click(\n", " stream_file, [input_audio, format], stream_as_file_output\n", " )\n", "\n", " gr.Examples(\n", " [[\"audio/cantina.wav\", \"wav\"], [\"audio/cantina.wav\", \"mp3\"]],\n", " [input_audio, format],\n", " fn=stream_file,\n", " outputs=stream_as_file_output,\n", " )\n", "\n", " with gr.Column():\n", " stream_as_bytes_btn = gr.Button(\"Stream as Bytes\")\n", " stream_as_bytes_output = gr.Audio(format=\"bytes\", streaming=True)\n", "\n", " def stream_bytes(audio_file):\n", " chunk_size = 20_000\n", " with open(audio_file, \"rb\") as f:\n", " while True:\n", " chunk = f.read(chunk_size)\n", " if chunk:\n", " yield chunk\n", " sleep(1)\n", " else:\n", " break\n", " stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue().launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('audio')\n", "!wget -q -O audio/cantina.wav https://github.com/gradio-app/gradio/raw/main/demo/stream_audio_out/audio/cantina.wav"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pydub import AudioSegment\n", "from time import sleep\n", "\n", "with gr.Blocks() as demo:\n", " input_audio = gr.Audio(label=\"Input Audio\", type=\"filepath\", format=\"mp3\")\n", " with gr.Row():\n", " with gr.Column():\n", " stream_as_file_btn = gr.Button(\"Stream as File\")\n", " format = gr.Radio([\"wav\", \"mp3\"], value=\"wav\", label=\"Format\")\n", " stream_as_file_output = gr.Audio(streaming=True)\n", "\n", " def stream_file(audio_file, format):\n", " audio = AudioSegment.from_file(audio_file)\n", " i = 0\n", " chunk_size = 1000\n", " while chunk_size * i < len(audio):\n", " chunk = audio[chunk_size * i : chunk_size * (i + 1)]\n", " i += 1\n", " if chunk:\n", " file = f\"/tmp/{i}.{format}\"\n", " chunk.export(file, format=format)\n", " yield file\n", " sleep(0.5)\n", "\n", " stream_as_file_btn.click(\n", " stream_file, [input_audio, format], stream_as_file_output\n", " )\n", "\n", " gr.Examples(\n", " [[\"audio/cantina.wav\", \"wav\"], [\"audio/cantina.wav\", \"mp3\"]],\n", " [input_audio, format],\n", " fn=stream_file,\n", " outputs=stream_as_file_output,\n", " )\n", "\n", " with gr.Column():\n", " stream_as_bytes_btn = gr.Button(\"Stream as Bytes\")\n", " stream_as_bytes_output = gr.Audio(streaming=True)\n", "\n", " def stream_bytes(audio_file):\n", " chunk_size = 20_000\n", " with open(audio_file, \"rb\") as f:\n", " while True:\n", " chunk = f.read(chunk_size)\n", " if chunk:\n", " yield chunk\n", " sleep(1)\n", " else:\n", " break\n", " stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.queue().launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -36,7 +36,7 @@ with gr.Blocks() as demo:
|
||||
|
||||
with gr.Column():
|
||||
stream_as_bytes_btn = gr.Button("Stream as Bytes")
|
||||
stream_as_bytes_output = gr.Audio(format="bytes", streaming=True)
|
||||
stream_as_bytes_output = gr.Audio(streaming=True)
|
||||
|
||||
def stream_bytes(audio_file):
|
||||
chunk_size = 20_000
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_wav2vec"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "import gradio as gr\n", "import time\n", "\n", "p = pipeline(\"automatic-speech-recognition\")\n", "\n", "def transcribe(audio, state=\"\"):\n", " time.sleep(2)\n", " text = p(audio)[\"text\"]\n", " state += text + \" \"\n", " return state, state\n", "\n", "demo = gr.Interface(\n", " fn=transcribe, \n", " inputs=[\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", streaming=True), \n", " \"state\"\n", " ],\n", " outputs=[\n", " \"textbox\",\n", " \"state\"\n", " ],\n", " live=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: streaming_wav2vec"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from transformers import pipeline\n", "import gradio as gr\n", "import time\n", "\n", "p = pipeline(\"automatic-speech-recognition\")\n", "\n", "def transcribe(audio, state=\"\"):\n", " time.sleep(2)\n", " text = p(audio)[\"text\"] # type: ignore\n", " state += text + \" \"\n", " return state, state\n", "\n", "demo = gr.Interface(\n", " fn=transcribe, \n", " inputs=[\n", " gr.Audio(sources=[\"microphone\"], type=\"filepath\", streaming=True), \n", " \"state\"\n", " ],\n", " outputs=[\n", " \"textbox\",\n", " \"state\"\n", " ],\n", " live=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -6,7 +6,7 @@ p = pipeline("automatic-speech-recognition")
|
||||
|
||||
def transcribe(audio, state=""):
|
||||
time.sleep(2)
|
||||
text = p(audio)["text"]
|
||||
text = p(audio)["text"] # type: ignore
|
||||
state += text + " "
|
||||
return state, state
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: text_generation\n", "### This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model='gpt2')\n", "\n", "def generate(text):\n", " result = generator(text, max_length=30, num_return_sequences=1)\n", " return result[0][\"generated_text\"]\n", "\n", "examples = [\n", " [\"The Moon's orbit around Earth has\"],\n", " [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=generate,\n", " inputs=gr.Textbox(lines=5, label=\"Input Text\"),\n", " outputs=gr.Textbox(label=\"Generated Text\"),\n", " examples=examples\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: text_generation\n", "### This text generation demo takes in input text and returns generated text. It uses the Transformers library to set up the model and has two examples.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model='gpt2')\n", "\n", "def generate(text):\n", " result = generator(text, max_length=30, num_return_sequences=1)\n", " return result[0][\"generated_text\"] # type: ignore\n", "\n", "examples = [\n", " [\"The Moon's orbit around Earth has\"],\n", " [\"The smooth Borealis basin in the Northern Hemisphere covers 40%\"],\n", "]\n", "\n", "demo = gr.Interface(\n", " fn=generate,\n", " inputs=gr.Textbox(lines=5, label=\"Input Text\"),\n", " outputs=gr.Textbox(label=\"Generated Text\"),\n", " examples=examples\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -5,7 +5,7 @@ generator = pipeline('text-generation', model='gpt2')
|
||||
|
||||
def generate(text):
|
||||
result = generator(text, max_length=30, num_return_sequences=1)
|
||||
return result[0]["generated_text"]
|
||||
return result[0]["generated_text"] # type: ignore
|
||||
|
||||
examples = [
|
||||
["The Moon's orbit around Earth has"],
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: tictactoe"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n", " board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n", "\n", " def place(board, turn, evt: gr.SelectData):\n", " if evt.value:\n", " return board, turn\n", " board[evt.index[0]][evt.index[1]] = turn\n", " turn = \"O\" if turn == \"X\" else \"X\"\n", " return board, turn\n", "\n", " board.select(place, [board, turn], [board, turn], show_progress=\"hidden\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: tictactoe"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "with gr.Blocks() as demo:\n", " turn = gr.Textbox(\"X\", interactive=False, label=\"Turn\")\n", " board = gr.Dataframe(value=[[\"\", \"\", \"\"]] * 3, interactive=False, type=\"array\")\n", "\n", " def place(board: list[list[int]], turn, evt: gr.SelectData): # type: ignore\n", " if evt.value:\n", " return board, turn\n", " board[evt.index[0]][evt.index[1]] = turn\n", " turn = \"O\" if turn == \"X\" else \"X\"\n", " return board, turn\n", "\n", " board.select(place, [board, turn], [board, turn], show_progress=\"hidden\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -4,7 +4,7 @@ with gr.Blocks() as demo:
|
||||
turn = gr.Textbox("X", interactive=False, label="Turn")
|
||||
board = gr.Dataframe(value=[["", "", ""]] * 3, interactive=False, type="array")
|
||||
|
||||
def place(board, turn, evt: gr.SelectData):
|
||||
def place(board: list[list[int]], turn, evt: gr.SelectData): # type: ignore
|
||||
if evt.value:
|
||||
return board, turn
|
||||
board[evt.index[0]][evt.index[1]] = turn
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: translation\n", "### This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\n", "import torch\n", "\n", "# this model was loaded from https://hf.co/models\n", "model = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "tokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "device = 0 if torch.cuda.is_available() else -1\n", "LANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n", "\n", "def translate(text, src_lang, tgt_lang):\n", " \"\"\"\n", " Translate the text from source lang to target lang\n", " \"\"\"\n", " translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n", " result = translation_pipeline(text)\n", " return result[0]['translation_text']\n", "\n", "demo = gr.Interface(\n", " fn=translate,\n", " inputs=[\n", " gr.components.Textbox(label=\"Text\"),\n", " gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n", " gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n", " ],\n", " outputs=[\"text\"],\n", " examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n", " cache_examples=False,\n", " title=\"Translation Demo\",\n", " description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n", ")\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: translation\n", "### This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\n", "import torch\n", "\n", "# this model was loaded from https://hf.co/models\n", "model = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "tokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "device = 0 if torch.cuda.is_available() else -1\n", "LANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n", "\n", "def translate(text, src_lang, tgt_lang):\n", " \"\"\"\n", " Translate the text from source lang to target lang\n", " \"\"\"\n", " translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n", " result = translation_pipeline(text)\n", " return result[0]['translation_text'] #type: ignore\n", "\n", "demo = gr.Interface(\n", " fn=translate,\n", " inputs=[\n", " gr.components.Textbox(label=\"Text\"),\n", " gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n", " gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n", " ],\n", " outputs=[\"text\"],\n", " examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n", " cache_examples=False,\n", " title=\"Translation Demo\",\n", " description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n", ")\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -14,7 +14,7 @@ def translate(text, src_lang, tgt_lang):
|
||||
"""
|
||||
translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)
|
||||
result = translation_pipeline(text)
|
||||
return result[0]['translation_text']
|
||||
return result[0]['translation_text'] #type: ignore
|
||||
|
||||
demo = gr.Interface(
|
||||
fn=translate,
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unified_demo_text_generation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model = 'gpt2')\n", "\n", "def generate_text(text_prompt):\n", " response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n", " return response[0]['generated_text']\n", "\n", "textbox = gr.Textbox()\n", "\n", "demo = gr.Interface(generate_text, textbox, textbox)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: unified_demo_text_generation"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch transformers"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import pipeline\n", "\n", "generator = pipeline('text-generation', model = 'gpt2')\n", "\n", "def generate_text(text_prompt):\n", " response = generator(text_prompt, max_length = 30, num_return_sequences=5)\n", " return response[0]['generated_text'] #type: ignore\n", "\n", "textbox = gr.Textbox()\n", "\n", "demo = gr.Interface(generate_text, textbox, textbox)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -5,7 +5,7 @@ generator = pipeline('text-generation', model = 'gpt2')
|
||||
|
||||
def generate_text(text_prompt):
|
||||
response = generator(text_prompt, max_length = 30, num_return_sequences=5)
|
||||
return response[0]['generated_text']
|
||||
return response[0]['generated_text'] #type: ignore
|
||||
|
||||
textbox = gr.Textbox()
|
||||
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: waveform"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "\n", "\n", "COLORS = [\n", " [\"#ff0000\", \"#00ff00\"],\n", " [\"#00ff00\", \"#0000ff\"],\n", " [\"#0000ff\", \"#ff0000\"],\n", "] \n", "\n", "def audio_waveform(audio, image):\n", " return (\n", " audio,\n", " gr.make_waveform(audio),\n", " gr.make_waveform(audio, animate=True),\n", " gr.make_waveform(audio, bg_image=image, bars_color=random.choice(COLORS)),\n", " )\n", "\n", "\n", "gr.Interface(\n", " audio_waveform,\n", " inputs=[gr.Audio(), gr.Image(type=\"filepath\")],\n", " outputs=[\n", " gr.Audio(),\n", " gr.Video(),\n", " gr.Video(),\n", " gr.Video(),\n", " ],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: waveform"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import random\n", "\n", "\n", "COLORS = [\n", " [\"#ff0000\", \"#00ff00\"],\n", " [\"#00ff00\", \"#0000ff\"],\n", " [\"#0000ff\", \"#ff0000\"],\n", "] \n", "\n", "def audio_waveform(audio, image):\n", " return (\n", " audio,\n", " gr.make_waveform(audio),\n", " gr.make_waveform(audio, animate=True),\n", " gr.make_waveform(audio, bg_image=image, bars_color=str(random.choice(COLORS))),\n", " )\n", "\n", "\n", "gr.Interface(\n", " audio_waveform,\n", " inputs=[gr.Audio(), gr.Image(type=\"filepath\")],\n", " outputs=[\n", " gr.Audio(),\n", " gr.Video(),\n", " gr.Video(),\n", " gr.Video(),\n", " ],\n", ").launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -13,7 +13,7 @@ def audio_waveform(audio, image):
|
||||
audio,
|
||||
gr.make_waveform(audio),
|
||||
gr.make_waveform(audio, animate=True),
|
||||
gr.make_waveform(audio, bg_image=image, bars_color=random.choice(COLORS)),
|
||||
gr.make_waveform(audio, bg_image=image, bars_color=str(random.choice(COLORS))),
|
||||
)
|
||||
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Any, Callable
|
||||
from typing import TYPE_CHECKING, Any, Callable, Sequence
|
||||
|
||||
from gradio.components.base import Component, FormComponent
|
||||
from gradio.events import Events
|
||||
@ -25,7 +25,7 @@ class SimpleDropdown(FormComponent):
|
||||
label: str | None = None,
|
||||
info: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
scale: int | None = None,
|
||||
min_width: int = 160,
|
||||
|
@ -3,7 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from typing import TYPE_CHECKING, Any, Sequence
|
||||
|
||||
from gradio_client import handle_file
|
||||
from gradio_client.documentation import document
|
||||
@ -36,7 +36,7 @@ class SimpleImage(Component):
|
||||
*,
|
||||
label: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
show_download_button: bool = True,
|
||||
container: bool = True,
|
||||
|
@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Callable
|
||||
from typing import TYPE_CHECKING, Any, Callable, Sequence
|
||||
|
||||
from gradio.components.base import Component, FormComponent
|
||||
from gradio.events import Events
|
||||
@ -27,7 +27,7 @@ class SimpleTextbox(FormComponent):
|
||||
placeholder: str | None = None,
|
||||
label: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
scale: int | None = None,
|
||||
min_width: int = 160,
|
||||
|
@ -17,7 +17,16 @@ import webbrowser
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from types import ModuleType
|
||||
from typing import TYPE_CHECKING, Any, AsyncIterator, Callable, Literal, Sequence, cast
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
Literal,
|
||||
Sequence,
|
||||
cast,
|
||||
)
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
import anyio
|
||||
@ -473,8 +482,8 @@ class BlockFunction:
|
||||
def __init__(
|
||||
self,
|
||||
fn: Callable | None,
|
||||
inputs: list[Component],
|
||||
outputs: list[Block] | list[Component],
|
||||
inputs: Sequence[Component | BlockContext],
|
||||
outputs: Sequence[Component | BlockContext],
|
||||
preprocess: bool,
|
||||
postprocess: bool,
|
||||
inputs_as_dict: bool,
|
||||
@ -652,8 +661,16 @@ class BlocksConfig:
|
||||
self,
|
||||
targets: Sequence[EventListenerMethod],
|
||||
fn: Callable | None,
|
||||
inputs: Component | list[Component] | set[Component] | None,
|
||||
outputs: Block | list[Block] | list[Component] | None,
|
||||
inputs: Component
|
||||
| BlockContext
|
||||
| Sequence[Component | BlockContext]
|
||||
| AbstractSet[Component | BlockContext]
|
||||
| None,
|
||||
outputs: Component
|
||||
| BlockContext
|
||||
| Sequence[Component | BlockContext]
|
||||
| AbstractSet[Component | BlockContext]
|
||||
| None,
|
||||
preprocess: bool = True,
|
||||
postprocess: bool = True,
|
||||
scroll_to_output: bool = False,
|
||||
@ -711,21 +728,21 @@ class BlocksConfig:
|
||||
)
|
||||
for target in targets
|
||||
]
|
||||
if isinstance(inputs, set):
|
||||
if isinstance(inputs, AbstractSet):
|
||||
inputs_as_dict = True
|
||||
inputs = sorted(inputs, key=lambda x: x._id)
|
||||
else:
|
||||
inputs_as_dict = False
|
||||
if inputs is None:
|
||||
inputs = []
|
||||
elif not isinstance(inputs, list):
|
||||
elif not isinstance(inputs, Sequence):
|
||||
inputs = [inputs]
|
||||
|
||||
if isinstance(outputs, set):
|
||||
if isinstance(outputs, AbstractSet):
|
||||
outputs = sorted(outputs, key=lambda x: x._id)
|
||||
elif outputs is None:
|
||||
outputs = []
|
||||
elif not isinstance(outputs, list):
|
||||
elif not isinstance(outputs, Sequence):
|
||||
outputs = [outputs]
|
||||
|
||||
if fn is not None and not cancels:
|
||||
|
@ -17,9 +17,9 @@ INTERFACE_TEMPLATE = '''
|
||||
|
||||
{% for event in events %}
|
||||
def {{ event }}(self,
|
||||
fn: Callable | None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
outputs: Component | Sequence[Component] | None = None,
|
||||
fn: Callable | None = None,
|
||||
inputs: Block | Sequence[Block] | set[Block] | None = None,
|
||||
outputs: Block | Sequence[Block] | None = None,
|
||||
api_name: str | None | Literal[False] = None,
|
||||
scroll_to_output: bool = False,
|
||||
show_progress: Literal["full", "minimal", "hidden"] = "full",
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, List
|
||||
from typing import TYPE_CHECKING, Any, List, Sequence
|
||||
|
||||
import gradio_client.utils as client_utils
|
||||
import numpy as np
|
||||
@ -61,7 +61,7 @@ class AnnotatedImage(Component):
|
||||
color_map: dict[str, str] | None = None,
|
||||
label: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
container: bool = True,
|
||||
scale: int | None = None,
|
||||
|
@ -4,7 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Sequence
|
||||
|
||||
import httpx
|
||||
import numpy as np
|
||||
@ -83,7 +83,7 @@ class Audio(
|
||||
type: Literal["numpy", "filepath"] = "numpy",
|
||||
label: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
container: bool = True,
|
||||
scale: int | None = None,
|
||||
|
@ -3,7 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Sequence
|
||||
|
||||
from gradio_client.documentation import document
|
||||
|
||||
@ -67,7 +67,7 @@ class BarPlot(Plot):
|
||||
scale: int | None = None,
|
||||
min_width: int = 160,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
visible: bool = True,
|
||||
elem_id: str | None = None,
|
||||
elem_classes: list[str] | str | None = None,
|
||||
|
@ -12,7 +12,7 @@ import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Callable, Type
|
||||
from typing import TYPE_CHECKING, Any, Callable, Sequence, Type
|
||||
|
||||
import gradio_client.utils as client_utils
|
||||
|
||||
@ -148,7 +148,7 @@ class Component(ComponentBase, Block):
|
||||
key: int | str | None = None,
|
||||
load_fn: Callable | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
):
|
||||
self.server_fns = [
|
||||
getattr(self, value)
|
||||
@ -204,7 +204,7 @@ class Component(ComponentBase, Block):
|
||||
| tuple[
|
||||
Callable,
|
||||
list[tuple[Block, str]],
|
||||
Component | list[Component] | set[Component] | None,
|
||||
Component | Sequence[Component] | set[Component] | None,
|
||||
]
|
||||
) = None
|
||||
load_fn, initial_value = self.get_load_fn_and_initial_value(value, inputs)
|
||||
@ -255,7 +255,7 @@ class Component(ComponentBase, Block):
|
||||
self,
|
||||
callable: Callable,
|
||||
every: Timer | float | None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
):
|
||||
"""Add an event that runs `callable`, optionally at interval specified by `every`."""
|
||||
if isinstance(inputs, Component):
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal
|
||||
from typing import TYPE_CHECKING, Any, Callable, Literal, Sequence
|
||||
|
||||
from gradio_client.documentation import document
|
||||
|
||||
@ -26,7 +26,7 @@ class Button(Component):
|
||||
value: str | Callable = "Run",
|
||||
*,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
variant: Literal["primary", "secondary", "stop"] = "secondary",
|
||||
size: Literal["sm", "lg"] | None = None,
|
||||
icon: str | None = None,
|
||||
|
@ -13,6 +13,7 @@ from typing import (
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypedDict,
|
||||
@ -136,8 +137,10 @@ class Chatbot(Component):
|
||||
def __init__(
|
||||
self,
|
||||
value: (
|
||||
list[
|
||||
list[str | GradioComponent | tuple[str] | tuple[str | Path, str] | None]
|
||||
Sequence[
|
||||
Sequence[
|
||||
str | GradioComponent | tuple[str] | tuple[str | Path, str] | None
|
||||
]
|
||||
]
|
||||
| Callable
|
||||
| None
|
||||
@ -146,7 +149,7 @@ class Chatbot(Component):
|
||||
type: Literal["messages", "tuples"] = "tuples",
|
||||
label: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
container: bool = True,
|
||||
scale: int | None = None,
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Callable
|
||||
from typing import TYPE_CHECKING, Any, Callable, Sequence
|
||||
|
||||
from gradio_client.documentation import document
|
||||
|
||||
@ -31,7 +31,7 @@ class Checkbox(FormComponent):
|
||||
label: str | None = None,
|
||||
info: str | None = None,
|
||||
every: Timer | float | None = None,
|
||||
inputs: Component | list[Component] | set[Component] | None = None,
|
||||
inputs: Component | Sequence[Component] | set[Component] | None = None,
|
||||
show_label: bool | None = None,
|
||||
container: bool = True,
|
||||
scale: int | None = None,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user