Small fixes for multiple demos compatible with 3.0 (#1257)

* add required param but None

* import torch req, add chunk_length_s

import torch requirement for transformers
enable inference for longer audio files

* fix compononte initialization

* input number is float, force int to multipy string

* no need for Templates, fix class init

* expects array

* add requirements.txt for demo

* update with cleaner syntax

* add sample csv to fraud demo

* adapt to new syntax

* temp fix for Slider arguments

* add dep to requirements

* remove gr.Markdown from Interface init

* fix default value param name

* upgrade deepspeech, download models onstart

* use path resolution consistent with other demos

* remove redundant demo

* add example to interface

* fixed plotting issues

* plots

* deprecated carousel

Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
Radamés Ajna 2022-05-13 22:45:44 -07:00 committed by GitHub
parent 63d0a28c08
commit 73e98ddf15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 79 additions and 407 deletions

View File

@ -1,3 +1,5 @@
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
@ -28,8 +30,8 @@ with demo:
)
with gr.Row():
speed = gr.Slider(25, min=1, max=30, label="Speed")
angle = gr.Slider(45, min=0, max=90, label="Angle")
speed = gr.Slider(25, minimum=1, maximum=30, label="Speed")
angle = gr.Slider(45, minimum=0, maximum=90, label="Angle")
output = gr.Image(type="plot")
btn = gr.Button("Run")
btn.click(plot, [speed, angle], output)

View File

@ -40,9 +40,6 @@ with gr.Blocks() as demo:
gr.Dataframe(
interactive=True, headers=["One", "Two", "Three", "Four"], col_count=4
)
# layout components are static only
# carousel doesn't work like other components
# carousel = gr.Carousel()
if __name__ == "__main__":

View File

@ -0,0 +1,2 @@
torch
transformers

View File

@ -1,18 +1,18 @@
# from transformers import pipeline
from transformers import pipeline
import gradio as gr
# asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
# classifier = pipeline("text-classification")
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
classifier = pipeline("text-classification")
# def speech_to_text(speech):
# text = asr(speech)["text"]
# return text
def speech_to_text(speech):
text = asr(speech)["text"]
return text
# def text_to_sentiment(text):
# return classifier(text)[0]["label"]
def text_to_sentiment(text):
return classifier(text)[0]["label"]
demo = gr.Blocks()
@ -25,8 +25,8 @@ with demo:
b1 = gr.Button("Recognize Speech")
b2 = gr.Button("Classify Sentiment")
# b1.click(speech_to_text, inputs=audio_file, outputs=text)
# b2.click(text_to_sentiment, inputs=text, outputs=label)
b1.click(speech_to_text, inputs=audio_file, outputs=text)
b2.click(text_to_sentiment, inputs=text, outputs=label)
if __name__ == "__main__":
demo.launch()

View File

@ -1,12 +1,12 @@
import gradio as gr
def greet(name: str, repeat: int):
return "Hello " + name * repeat + "!!"
def greet(name: str, repeat: float):
return "Hello " + name * int(repeat) + "!!"
demo = gr.Interface(
fn=greet, inputs=[gr.Textbox(lines=2, max_lines=4), gr.Number()], outputs=gr.component("textarea")
fn=greet, inputs=[gr.Textbox(lines=2, max_lines=4), gr.Number()], outputs="textarea"
)
if __name__ == "__main__":

View File

@ -7,7 +7,7 @@ def snap(image):
return np.flipud(image)
demo = gr.Interface(snap, gr.component("webcam"), gr.component("image"))
demo = gr.Interface(snap, "webcam", "image")
if __name__ == "__main__":
demo.launch()

View File

@ -5,12 +5,12 @@ import time
def xray_model(diseases, img):
time.sleep(4)
return {disease: random.random() for disease in diseases}
return [{disease: random.random() for disease in diseases}]
def ct_model(diseases, img):
time.sleep(3)
return {disease: 0.1 for disease in diseases}
return [{disease: 0.1 for disease in diseases}]
with gr.Blocks() as demo:

View File

@ -1 +0,0 @@
fpdf

View File

@ -1,49 +0,0 @@
import os
import tempfile
import numpy as np
from fpdf import FPDF
import gradio as gr
def disease_report(img, scan_for, generate_report):
results = []
for i, mode in enumerate(["Red", "Green", "Blue"]):
color_filter = np.array([0, 0, 0])
color_filter[i] = 1
results.append([mode, img * color_filter])
tmp_dir = tempfile.gettempdir()
report = os.path.join(tmp_dir, "report.pdf")
if generate_report:
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt="Disease Report", ln=1, align="C")
pdf.cell(200, 10, txt="A Gradio Demo.", ln=2, align="C")
pdf.output(report)
return results, report if generate_report else None
demo = gr.Interface(
disease_report,
[
"image",
gr.CheckboxGroup(
["Cancer", "Rash", "Heart Failure", "Stroke", "Diabetes", "Pneumonia"]
),
"checkbox",
],
[
gr.Carousel(["text", "image"], label="Disease"),
gr.File(label="Report"),
],
title="Disease Report",
description="Upload an Xray and select the diseases to scan for.",
theme="grass",
flagging_options=["good", "bad", "etc"],
allow_flagging="auto",
)
if __name__ == "__main__":
demo.launch()

Binary file not shown.

Before

Width:  |  Height:  |  Size: 528 KiB

View File

@ -1,43 +0,0 @@
# This demo needs to be run from the repo folder.
# python demo/fake_gan/run.py
import random
import time
import gradio as gr
def fake_gan(*args):
time.sleep(1)
image = random.choice(
[
"https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
"https://images.unsplash.com/photo-1554151228-14d9def656e4?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=386&q=80",
"https://images.unsplash.com/photo-1542909168-82c3e7fdca5c?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxzZWFyY2h8MXx8aHVtYW4lMjBmYWNlfGVufDB8fDB8fA%3D%3D&w=1000&q=80",
"https://images.unsplash.com/photo-1546456073-92b9f0a8d413?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=387&q=80",
"https://images.unsplash.com/photo-1601412436009-d964bd02edbc?ixlib=rb-1.2.1&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=464&q=80",
]
)
return image
demo = gr.Interface(
fn=fake_gan,
inputs=[
gr.Image(label="Initial Image (optional)"),
gr.Markdown("**Parameters**"),
gr.Slider(25, minimum=0, maximum=50, label="TV_scale (for smoothness)"),
gr.Slider(25, minimum=0, maximum=50, label="Range_Scale (out of range RBG)"),
gr.Number(label="Respacing"),
gr.Markdown("**Parameters Two**"),
gr.Slider(25, minimum=0, maximum=50, label="Range_Scale (out of range RBG)"),
gr.Number(label="Respacing"),
gr.Markdown("**Parameters Three**"),
gr.Textbox(label="Respacing"),
],
outputs=gr.Image(label="Generated Image"),
title="FD-GAN",
description="This is a fake demo of a GAN. In reality, the images are randomly chosen from Unsplash.",
)
if __name__ == "__main__":
demo.launch()

View File

@ -1 +0,0 @@
matplotlib

View File

@ -1,38 +0,0 @@
import matplotlib.pyplot as plt
import numpy as np
import gradio as gr
def plot_forecast(final_year, companies, noise, show_legend, point_style):
start_year = 2020
x = np.arange(start_year, final_year + 1)
year_count = x.shape[0]
plt_format = ({"cross": "X", "line": "-", "circle": "o--"})[point_style]
fig = plt.figure()
ax = fig.add_subplot(111)
for i, company in enumerate(companies):
series = np.arange(0, year_count, dtype=float)
series = series**2 * (i + 1)
series += np.random.rand(year_count) * noise
ax.plot(x, series, plt_format)
if show_legend:
plt.legend(companies)
plt.close()
return fig
demo = gr.Interface(
plot_forecast,
[
gr.Radio([2025, 2030, 2035, 2040], label="Project to:"),
gr.CheckboxGroup(["Google", "Microsoft", "Gradio"], label="Company Selection"),
gr.Slider(minimum=1, maximum=100, label="Noise Level"),
gr.Checkbox(label="Show Legend"),
gr.Dropdown(["cross", "line", "circle"], label="Style"),
],
gr.Image(plot=True, label="forecast"),
)
if __name__ == "__main__":
demo.launch()

View File

@ -0,0 +1,11 @@
time,retail,food,other
0,109,145,86
1,35,87,43
2,49,117,34
3,127,66,17
4,39,82,17
5,101,56,79
6,100,129,67
7,17,88,97
8,76,85,145
9,111,106,35
1 time retail food other
2 0 109 145 86
3 1 35 87 43
4 2 49 117 34
5 3 127 66 17
6 4 39 82 17
7 5 101 56 79
8 6 100 129 67
9 7 17 88 97
10 8 76 85 145
11 9 111 106 35

View File

@ -1,5 +1,5 @@
import random
import os
import gradio as gr
@ -31,6 +31,9 @@ demo = gr.Interface(
gr.Timeseries(x="time", y=["retail", "food", "other"]),
gr.Label(label="Fraud Level"),
],
examples=[
[os.path.join(os.path.dirname(__file__), "fraud.csv"), ["retail", "food", "other"], 1.0],
],
)
if __name__ == "__main__":
demo.launch()

View File

@ -18,9 +18,9 @@ def generate_tone(note, octave, duration):
demo = gr.Interface(
generate_tone,
[
gr.inputs.Dropdown(notes, type="index"),
gr.inputs.Slider(4, 6, step=1),
gr.inputs.Textbox(type="number", default=1, label="Duration in seconds"),
gr.Dropdown(notes, type="index"),
gr.Slider(value=4, minimum=4, maximum=6, step=1),
gr.Textbox(value=1, type="number", label="Duration in seconds"),
],
"audio",
)

View File

@ -80,17 +80,6 @@ def fn(
os.path.join(os.path.dirname(__file__), "files/titanic.csv"),
df1, # Dataframe
np.random.randint(0, 10, (4, 4)), # Dataframe
[
im
for im in [
im1,
im2,
im3,
im4,
os.path.join(os.path.dirname(__file__), "files/cheetah1.jpg"),
]
if im is not None
], # Carousel
df2, # Timeseries
)
@ -135,7 +124,6 @@ demo = gr.Interface(
gr.File(label="File"),
gr.Dataframe(label="Dataframe"),
gr.Dataframe(label="Numpy"),
gr.Carousel(components="image", label="Carousel"),
gr.Timeseries(x="time", y=["price", "value"], label="Timeseries"),
],
examples=[

View File

@ -1,18 +1,27 @@
import time
import gradio as gr
import os
def load_mesh(mesh_file_name):
time.sleep(2)
return mesh_file_name
inputs = gr.Model3D()
outputs = gr.Model3D(clear_color=[0.8, 0.2, 0.2, 1.0])
demo = gr.Interface(
fn=load_mesh,
inputs=inputs,
fn=load_mesh,
inputs=inputs,
outputs=outputs,
examples=[["files/Bunny.obj"], ["files/Duck.glb"], ["files/Fox.gltf"],["files/face.obj"]], cache_examples=True
examples=[
[os.path.join(os.path.dirname(__file__), "files/Bunny.obj")],
[os.path.join(os.path.dirname(__file__), "files/Duck.glb")],
[os.path.join(os.path.dirname(__file__), "files/Fox.gltf")],
[os.path.join(os.path.dirname(__file__), "files/face.obj")],
],
cache_examples=True,
)
if __name__ == "__main__":

View File

@ -12,5 +12,5 @@ def random_sentence():
return sentence_list[random.randint(0, 2)]
demo = gr.Interface(fn=random_sentence, outputs="text")
demo = gr.Interface(fn=random_sentence, inputs=None, outputs="text")
demo.launch()

View File

@ -1,3 +1,4 @@
numpy
matplotlib
bokeh
plotly

View File

@ -57,7 +57,7 @@ inputs = [
value=["USA", "Canada"]),
gr.Checkbox(label="Social Distancing?"),
]
outputs = gr.Plot(type="auto")
outputs = gr.Plot()
demo = gr.Interface(fn=outbreak, inputs=inputs, outputs=outputs)

View File

@ -28,7 +28,7 @@ demo = gr.Interface(
sales_projections,
gr.Dataframe(
headers=["Name", "Jan Sales", "Feb Sales", "Mar Sales"],
default=[["Jon", 12, 14, 18], ["Alice", 14, 17, 2], ["Sana", 8, 9.5, 12]],
value=[["Jon", 12, 14, 18], ["Alice", 14, 17, 2], ["Sana", 8, 9.5, 12]],
),
["dataframe", "plot", "numpy"],
description="Enter sales figures for employees to predict sales trajectory over year.",

View File

@ -1,10 +1,12 @@
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import gradio as gr
def stock_forecast(final_year, companies, noise, show_legend, point_style):
def plot_forecast(final_year, companies, noise, show_legend, point_style):
start_year = 2020
x = np.arange(start_year, final_year + 1)
year_count = x.shape[0]
@ -18,20 +20,19 @@ def stock_forecast(final_year, companies, noise, show_legend, point_style):
ax.plot(x, series, plt_format)
if show_legend:
plt.legend(companies)
plt.close()
return fig
demo = gr.Interface(
stock_forecast,
plot_forecast,
[
gr.Radio([2025, 2030, 2035, 2040], label="Project to:"),
gr.CheckboxGroup(["Google", "Microsoft", "Gradio"]),
gr.Slider(minimum=1, maximum=100),
"checkbox",
gr.CheckboxGroup(["Google", "Microsoft", "Gradio"], label="Company Selection"),
gr.Slider(minimum=1, maximum=100, label="Noise Level"),
gr.Checkbox(label="Show Legend"),
gr.Dropdown(["cross", "line", "circle"], label="Style"),
],
gr.Image(plot=True, label="forecast"),
gr.Plot(label="forecast"),
)
if __name__ == "__main__":

View File

@ -1 +1 @@
deepspeech==0.8.2
deepspeech==0.9.3

View File

@ -1,9 +1,15 @@
from deepspeech import Model
import gradio as gr
import numpy as np
import urllib.request
model_file_path = "deepspeech-0.9.3-models.pbmm"
lm_file_path = "deepspeech-0.9.3-models.scorer"
url = "https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/"
urllib.request.urlretrieve(url + model_file_path, filename=model_file_path)
urllib.request.urlretrieve(url + lm_file_path, filename=lm_file_path)
model_file_path = "deepspeech-0.8.2-models.pbmm"
lm_file_path = "deepspeech-0.8.2-models.scorer"
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
@ -42,4 +48,4 @@ def transcribe(speech, stream):
demo = gr.Interface(transcribe, ["microphone", "state"], ["text", "state"], live=True)
if __name__ == "__main__":
demo.launch()
demo.launch(debug=True)

View File

@ -11,7 +11,6 @@ from gradio.components import (
JSON,
Audio,
Button,
Carousel,
Chatbot,
Checkbox,
Checkboxgroup,

View File

@ -3033,111 +3033,6 @@ class Gallery(IOComponent):
)
# max_grid=[3], grid_behavior="scale", height="auto"
class Carousel(IOComponent):
"""
Used to display a list of arbitrary components that can be scrolled through.
Preprocessing: this component does *not* accept input.
Postprocessing: Expects a nested {List[List]} where the inner elements depend on the components in the Carousel.
Demos: disease_report
"""
def __init__(
self,
*,
components: Component | List[Component],
label: Optional[str] = None,
show_label: bool = True,
visible: bool = True,
elem_id: Optional[str] = None,
**kwargs,
):
"""
Parameters:
components (Union[List[OutputComponent], OutputComponent]): Classes of component(s) that will be scrolled through.
label (Optional[str]): component name in interface.
show_label (bool): if True, will display label.
visible (bool): If False, component will be hidden.
"""
if not isinstance(components, list):
components = [components]
self.components = [
get_component_instance(component) for component in components
]
IOComponent.__init__(
self,
label=label,
show_label=show_label,
visible=visible,
elem_id=elem_id,
**kwargs,
)
def get_config(self):
return {
"components": [component.get_config() for component in self.components],
**IOComponent.get_config(self),
}
@staticmethod
def update(
value: Optional[Any] = None,
label: Optional[str] = None,
show_label: Optional[bool] = None,
visible: Optional[bool] = None,
):
return {
"label": label,
"show_label": show_label,
"visible": visible,
"value": value,
"__type__": "update",
}
def postprocess(self, y):
"""
Parameters:
y (List[List[Any]]): carousel output
Returns:
(List[List[Any]]): 2D array, where each sublist represents one set of outputs or 'slide' in the carousel
"""
if isinstance(y, list):
if len(y) != 0 and not isinstance(y[0], list):
y = [[z] for z in y]
output = []
for row in y:
output_row = []
for i, cell in enumerate(row):
output_row.append(self.components[i].postprocess(cell))
output.append(output_row)
return output
else:
raise ValueError("Unknown type. Please provide a list for the Carousel.")
def save_flagged(self, dir, label, data, encryption_key):
return json.dumps(
[
[
component.save_flagged(
dir, f"{label}_{j}", data[i][j], encryption_key
)
for j, component in enumerate(self.components)
]
for i, _ in enumerate(data)
]
)
def restore_flagged(self, dir, data, encryption_key):
return [
[
component.restore_flagged(dir, sample, encryption_key)
for component, sample in zip(self.components, sample_set)
]
for sample_set in json.loads(data)
]
class Chatbot(Changeable, IOComponent):
"""
Displays a chatbot output showing both user submitted messages and responses
@ -3341,7 +3236,7 @@ class Plot(Changeable, Clearable, IOComponent):
Preprocessing: this component does *not* accept input.
Postprocessing: expects either a {matplotlib.pyplot.Figure}, a {plotly.graph_objects._figure.Figure}, or a {dict} corresponding to a bokeh plot (json_item format)
Demos: outbreak_forecast, blocks_kinematics
Demos: outbreak_forecast, blocks_kinematics, stock_forecast
"""
def __init__(
@ -3394,7 +3289,6 @@ class Plot(Changeable, Clearable, IOComponent):
(str): plot type
(str): plot base64 or json
"""
dtype = self.type
if isinstance(y, (ModuleType, matplotlib.pyplot.Figure)):
dtype = "matplotlib"
out_y = processing_utils.encode_plot_to_base64(y)

View File

@ -12,7 +12,6 @@ from typing import Dict, List, Optional
from gradio.components import HTML as C_HTML
from gradio.components import JSON as C_JSON
from gradio.components import Audio as C_Audio
from gradio.components import Carousel as C_Carousel
from gradio.components import Chatbot as C_Chatbot
from gradio.components import Component as Component
from gradio.components import Dataframe as C_Dataframe
@ -311,7 +310,7 @@ class HTML(C_HTML):
super().__init__(label=label)
class Carousel(C_Carousel):
class Carousel:
"""
Component displays a set of output components that can be scrolled through.
Output type: List[List[Any]]
@ -328,11 +327,10 @@ class Carousel(C_Carousel):
components (Union[List[OutputComponent], OutputComponent]): Classes of component(s) that will be scrolled through.
label (str): component name in interface.
"""
warnings.warn(
"Usage of gradio.outputs is deprecated, and will not be supported in the future, please import your components from gradio.components",
DeprecationWarning,
raise NotImplementedError(
"The Carousel component has not been implemented in Gradio 3.0. Please "
"consider using the Gallery component instead."
)
super().__init__(components=components, label=label)
class Chatbot(C_Chatbot):

View File

@ -47,16 +47,6 @@ class Sketchpad(components.Image):
)
class Plot(components.Image):
def __init__(self, **kwargs):
"""
Custom component
@param kwargs:
"""
self.is_template = True
super().__init__(type="plot", **kwargs)
class Pil(components.Image):
def __init__(self, **kwargs):
"""

View File

@ -1578,102 +1578,5 @@ class TestHTML(unittest.TestCase):
self.assertEqual(iface.process(["test"])[0], "<strong>test</strong>")
class TestCarousel(unittest.TestCase):
def test_component_functions(self):
"""
Postprocess, get_config, save_flagged, restore_flagged
"""
carousel_output = gr.Carousel(
components=[gr.Textbox(), gr.Image()], label="Disease"
)
output = carousel_output.postprocess(
[
["Hello World", "test/test_files/bus.png"],
["Bye World", "test/test_files/bus.png"],
]
)
self.assertEqual(
output,
[
["Hello World", deepcopy(media_data.BASE64_IMAGE)],
["Bye World", deepcopy(media_data.BASE64_IMAGE)],
],
)
carousel_output = gr.Carousel(components=gr.Textbox(), label="Disease")
output = carousel_output.postprocess([["Hello World"], ["Bye World"]])
self.assertEqual(output, [["Hello World"], ["Bye World"]])
self.assertEqual(
carousel_output.get_config(),
{
"components": [
{
"name": "textbox",
"show_label": True,
"label": None,
"value": "",
"lines": 1,
"max_lines": 20,
"style": {},
"elem_id": None,
"visible": True,
"placeholder": None,
"interactive": None,
}
],
"name": "carousel",
"show_label": True,
"label": "Disease",
"style": {},
"elem_id": None,
"visible": True,
"interactive": None,
},
)
output = carousel_output.postprocess(["Hello World", "Bye World"])
self.assertEqual(output, [["Hello World"], ["Bye World"]])
with self.assertRaises(ValueError):
carousel_output.postprocess("Hello World!")
with tempfile.TemporaryDirectory() as tmpdirname:
to_save = carousel_output.save_flagged(
tmpdirname, "carousel_output", output, None
)
self.assertEqual(to_save, '[["Hello World"], ["Bye World"]]')
restored = carousel_output.restore_flagged(tmpdirname, to_save, None)
self.assertEqual(output, restored)
def test_in_interface(self):
"""
Interface, process
"""
carousel_output = gr.Carousel(
components=[gr.Textbox(), gr.Image()], label="Disease"
)
def report(img):
results = []
for i, mode in enumerate(["Red", "Green", "Blue"]):
color_filter = np.array([0, 0, 0])
color_filter[i] = 1
results.append([mode, img * color_filter])
return results
iface = gr.Interface(report, gr.Image(type="numpy"), carousel_output)
result = iface.process([deepcopy(media_data.BASE64_IMAGE)])
self.assertTrue(result[0][0][0] == "Red")
self.assertTrue(
result[0][0][1].startswith("data:image/png;base64,iVBORw0KGgoAAA")
)
self.assertTrue(result[0][1][0] == "Green")
self.assertTrue(
result[0][1][1].startswith("data:image/png;base64,iVBORw0KGgoAAA")
)
self.assertTrue(result[0][2][0] == "Blue")
self.assertTrue(
result[0][2][1].startswith("data:image/png;base64,iVBORw0KGgoAAA")
)
if __name__ == "__main__":
unittest.main()