Merge branch 'master' into abidlabs/test_launch

This commit is contained in:
Abubakar Abid 2020-08-28 11:05:19 -05:00 committed by GitHub
commit f44d43dc13
46 changed files with 149 additions and 212 deletions

3
.gitignore vendored
View File

@ -20,4 +20,5 @@ demo/models/*
dist/*
*.h5
docs.json
*.bak
*.bak
demo/tmp.zip

View File

@ -1,43 +0,0 @@
import SimpleHTTPServer
class CORSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
return f

View File

@ -13,8 +13,7 @@ from gradio.component import Component
import base64
import numpy as np
import PIL.Image
import PIL.ImageOps
import PIL
import scipy.io.wavfile
from gradio import processing_utils
import pandas as pd
@ -579,4 +578,4 @@ class Microphone(InputComponent):
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.wav'
with open("{}/{}".format(dir, filename), "wb+") as f:
f.write(wav_obj)
return filename
return filename

View File

@ -43,6 +43,9 @@ button, input[type="submit"], input[type="reset"], input[type="button"], select[
max-width: 1028px;
width: 100%;
margin: 0 auto;
padding-left: 24px;
padding-right: 24px;
box-sizing: border-box;
}
.panels {
display: flex;

View File

@ -5,31 +5,30 @@ function gradio(config, fn, target) {
Live at <a class="share-link" target="_blank"></a>.
<button class="share-copy">Copy Link</button>
</div>
<div class="container">
<h1 class="title"></h1>
<p class="description"></p>
<div class="panels">
<div class="panel input_panel">
<div class="input_interfaces">
</div>
<div class="panel_buttons">
<input class="clear panel_button" type="reset" value="CLEAR">
<input class="submit panel_button" type="submit" value="SUBMIT"/>
</div>
<h1 class="title"></h1>
<p class="description"></p>
<div class="panels">
<div class="panel input_panel">
<div class="input_interfaces">
</div>
<div class="panel_buttons">
<input class="clear panel_button" type="reset" value="CLEAR">
<input class="submit panel_button" type="submit" value="SUBMIT"/>
</div>
<div class="panel output_panel">
<div class="loading invisible">
<img class="loading_in_progress" src="/static/img/logo_loading.gif">
<img class="loading_failed" src="/static/img/logo_error.png">
</div>
<div class="panel output_panel">
<div class="loading invisible">
<img class="loading_in_progress" src="/static/img/logo_loading.gif">
<img class="loading_failed" src="/static/img/logo_error.png">
</div>
<div class="output_interfaces">
</div>
<div class="panel_buttons">
<input class="screenshot panel_button" type="button" value="SCREENSHOT"/>
<div class="screenshot_logo">
<img src="/static/img/logo_inline.png">
</div>
<div class="output_interfaces">
</div>
<div class="panel_buttons">
<input class="screenshot panel_button" type="button" value="SCREENSHOT"/>
<div class="screenshot_logo">
<img src="/static/img/logo_inline.png">
</div>
<input class="flag panel_button" type="button" value="FLAG"/>
<input class="flag panel_button" type="button" value="FLAG"/>
</div>
</div>
</div>

View File

@ -46,6 +46,9 @@ const image_input = {
var io = this;
this.source = opts.source;
this.tool = opts.tool;
if (this.tool == "select") {
this.target.find('.edit_holder').hide();
}
$('body').append(this.overlay_html.format(this.id));
this.overlay_target = $(`.overlay[interface_id=${this.id}]`);
if (this.source == "upload") {

View File

@ -41,7 +41,6 @@
<link rel="stylesheet" href="/static/css/style.css">
<link rel="stylesheet" href="/static/css/gradio.css">
<link rel="stylesheet" href="/static/css/interfaces/input/csv.css">
<link rel="stylesheet" href="/static/css/interfaces/input/image.css">
<link rel="stylesheet" href="/static/css/interfaces/input/sketchpad.css">
<link rel="stylesheet" href="/static/css/interfaces/input/textbox.css">
@ -64,7 +63,7 @@
</head>
<body id="lib">
<div id="interface_target"></div>
<div id="interface_target" class="container"></div>
<div id="credit"><a href="https://github.com/gradio-app/gradio" target="_blank">
<img src="static/img/logo_inline.png">
</a></div>
@ -88,12 +87,12 @@
<script src="/static/js/vendor/jexcel.min.js"></script>
<script src="/static/js/vendor/jsuites.min.js"></script>
<script src="/static/js/vendor/cropper.min.js"></script>
<script src="/static/js/vendor/sketchpad.js"></script>
<script src="/static/js/vendor/webcam.min.js"></script>
<script src="/static/js/utils.js"></script>
<script src="/static/js/all_io.js"></script>
<script src="/static/js/interfaces/input/csv.js"></script>
<script src="/static/js/interfaces/input/image.js"></script>
<script src="/static/js/vendor/sketchpad.js"></script>
<script src="/static/js/interfaces/input/sketchpad.js"></script>
<script src="/static/js/interfaces/input/textbox.js"></script>
<script src="/static/js/interfaces/input/radio.js"></script>
@ -104,7 +103,6 @@
<script src="/static/js/interfaces/input/dataframe.js"></script>
<script src="/static/js/interfaces/input/audio.js"></script>
<script src="/static/js/interfaces/input/file.js"></script>
<script src="/static/js/vendor/webcam.min.js"></script>
<script src="/static/js/interfaces/input/webcam.js"></script>
<script src="/static/js/interfaces/input/microphone.js"></script>
<script src="/static/js/interfaces/output/image.js"></script>

View File

@ -1,3 +1,5 @@
# Demo: (Textbox, Textbox) -> (HighlightedText)
import gradio as gr
from difflib import Differ

View File

@ -1,3 +1,5 @@
# Demo: (Image) -> (Label)
import tensorflow as tf
import gradio
import gradio as gr
@ -15,7 +17,7 @@ def recognize_digit(image):
io = gr.Interface(
recognize_digit,
gradio.inputs.Image(shape=(28, 28), image_mode="L", source="canvas"),
"sketchpad",
gradio.outputs.Label(num_top_classes=3),
live=True,
capture_session=True,

View File

@ -1,3 +1,5 @@
# Demo: (Dataframe, Dropdown) -> (Dataframe)
import gradio as gr
import numpy as np
import random

View File

@ -1,3 +1,5 @@
# Demo: (Dropdown, Slider, Textbox) -> (Audio)
import gradio as gr
import numpy as np
@ -8,7 +10,8 @@ def generate_tone(note, octave, duration):
sr = 48000
a4_freq, tones_from_a4 = 440, 12 * (octave - 4) + (note - 9)
frequency = a4_freq * 2 ** (tones_from_a4 / 12)
audio = np.linspace(0, int(duration), int(duration * sr))
duration = int(duration)
audio = np.linspace(0, duration, duration * sr)
audio = (20000 * np.sin(audio * (2 * np.pi * frequency))).astype(np.int16)
return sr, audio

View File

@ -1,23 +1,22 @@
# Demo: (Image) -> (Image)
import gradio as gr
from PIL import ImageFilter
def image_mod(image):
return image.rotate(45), image.filter(ImageFilter.FIND_EDGES)
return image.rotate(45)
io = gr.Interface(
image_mod,
gr.inputs.Image(type="pil", tool="select"),
[
gr.outputs.Image(type="pil"),
gr.outputs.Image(type="pil"),
],
examples=[
["images/cheetah1.jpg"],
["images/cheetah2.jpg"],
["images/lion.jpg"],
], live=True)
io = gr.Interface(image_mod,
gr.inputs.Image(type="pil"),
gr.outputs.Image(type="pil"),
examples=[
["images/cheetah1.jpg"],
["images/cheetah2.jpg"],
["images/lion.jpg"],
],
live=True,
)
io.test_launch()
io.launch()

View File

@ -1,8 +1,9 @@
# Demo: (Dataframe) -> (Dataframe)
import gradio as gr
def transpose(matrix):
print(matrix)
return matrix.T

View File

@ -1,3 +1,5 @@
# Demo: (Audio) -> (Audio)
import gradio as gr
import numpy as np

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 957 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 355 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 61 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 576 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@ -1,3 +1,5 @@
# Demo: (Slider, Dropdown, Radio, CheckboxGroup, Checkbox) -> (Textbox)
import gradio as gr

View File

@ -1,18 +1,20 @@
# Demo: (Audio) -> (Image)
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
def spectogram(audio):
def spectrogram(audio):
sr, data = audio
data = np.delete(data, 1, 1).reshape(-1)
frequencies, times, spectrogram = signal.spectrogram(data.reshape(-1), sr, window="hamming")
plt.pcolormesh(times, frequencies, np.log10(spectrogram))
frequencies, times, spectrogram_data = signal.spectrogram(data.reshape(-1), sr, window="hamming")
plt.pcolormesh(times, frequencies, np.log10(spectrogram_data))
return plt
io = gr.Interface(spectogram, "audio", "plot")
io = gr.Interface(spectrogram, "audio", "plot
io.test_launch()
io.launch()

View File

@ -1,3 +1,5 @@
# Demo: (Radio, CheckboxGroup, Slider, Checkbox, Dropdown) -> (Image)
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np

View File

@ -1,3 +1,5 @@
# Demo: (Textbox) -> (HighlightedText, KeyValues, HTML)
import spacy
from spacy import displacy
import gradio as gr
@ -16,8 +18,6 @@ def text_analysis(text):
pos_tokens = []
for token in doc:
pos_count["token_count"] += 1
pos_count[token.pos_] = pos_count.get(token.pos_, 0) + 1
pos_tokens.extend([(token.text, token.pos_), (" ", None)])
return pos_tokens, pos_count, html

View File

@ -1,8 +1,10 @@
# Demo: (Image) -> (Image)
import gradio as gr
def snap(image):
return image
return np.flipud(image)
io = gr.Interface(snap, gr.inputs.Image(shape=(100,100), image_mode="L", source="webcam"), "image")

View File

@ -1,3 +1,5 @@
# Demo: (File) -> (JSON)
import gradio as gr
from zipfile import ZipFile

View File

@ -1,3 +1,5 @@
# Demo: (File, File) -> (File)
import gradio as gr
from zipfile import ZipFile

View File

@ -3,6 +3,31 @@ from gradio.inputs import InputComponent
from gradio.outputs import OutputComponent
from gradio.interface import Interface
import inspect
from os import listdir
from os.path import join
import re
in_demos, out_demos = {}, {}
demo_regex = "# Demo: \((.*)\) -> \((.*)\)"
for demo in listdir("demo"):
if demo.endswith(".py"):
screenshots = listdir(join("demo/screenshots", demo[:-3]))[0]
demoset = [demo, [screenshots]]
with open(join("demo", demo)) as demo_file:
first_line = demo_file.readline()
match = re.match(demo_regex, first_line)
inputs = match.group(1).split(", ")
outputs = match.group(2).split(", ")
for i in inputs:
if i not in in_demos:
in_demos[i] = []
if demoset not in in_demos[i]:
in_demos[i].append(demoset)
for o in outputs:
if o not in out_demos:
out_demos[o] = []
if demoset not in out_demos[o]:
out_demos[o].append(demoset)
def get_params(func):
params_str = inspect.getdoc(func)
@ -33,7 +58,7 @@ def get_params(func):
param_set.insert(0, (params.args[neg_index],))
return param_set, params_doc
def document(cls_set):
def document(cls_set, demos):
docset = []
for cls in cls_set:
inp = {}
@ -45,11 +70,14 @@ def document(cls_set):
inp["type"] = doc.split("\n")[-1].split("type: ")[-1]
inp["params"], inp["params_doc"] = get_params(cls.__init__)
inp["shortcuts"] = list(cls.get_shortcut_implementations().items())
cls_name = cls.__name__
if cls_name in demos:
inp["demos"] = demos.get(cls_name, [])
docset.append(inp)
return docset
inputs = document(InputComponent.__subclasses__())
outputs = document(OutputComponent.__subclasses__())
inputs = document(InputComponent.__subclasses__(), in_demos)
outputs = document(OutputComponent.__subclasses__(), out_demos)
interface_params = get_params(Interface.__init__)
interface = {
"doc": inspect.getdoc(Interface),
@ -67,6 +95,6 @@ with open("docs.json", "w") as docs:
"inputs": inputs,
"outputs": outputs,
"interface": interface,
"launch": launch
"launch": launch,
}, docs)

View File

@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: gradio
Version: 1.1.6
Version: 1.1.7
Summary: Python library for easily interacting with trained machine learning models
Home-page: https://github.com/gradio-app/gradio-UI
Author: Abubakar Abid

View File

@ -3,7 +3,6 @@ README.md
setup.py
gradio/__init__.py
gradio/component.py
gradio/generate_docs.py
gradio/inputs.py
gradio/interface.py
gradio/networking.py

View File

@ -1,72 +0,0 @@
import json
from gradio.inputs import InputComponent
from gradio.outputs import OutputComponent
from gradio.interface import Interface
import inspect
def get_params(func):
params_str = inspect.getdoc(func)
params_doc = []
documented_params = {"self"}
for param_line in params_str.split("\n")[1:]:
if param_line.strip() == "Returns":
break
space_index = param_line.index(" ")
colon_index = param_line.index(":")
name = param_line[:space_index]
documented_params.add(name)
params_doc.append((name, param_line[space_index+2:colon_index-1], param_line[colon_index+2:]))
params = inspect.getfullargspec(func)
param_set = []
for i in range(len(params.args)):
neg_index = -1 - i
if params.args[neg_index] not in documented_params:
continue
if i < len(params.defaults):
default = params.defaults[neg_index]
if type(default) == str:
default = '"' + default + '"'
else:
default = str(default)
param_set.insert(0, (params.args[neg_index], default))
else:
param_set.insert(0, (params.args[neg_index],))
return param_set, params_doc
def document(cls_set):
docset = []
for cls in cls_set:
inp = {}
inp["name"] = cls.__name__
doc = inspect.getdoc(cls)
if doc.startswith("DEPRECATED"):
continue
inp["doc"] = "\n".join(doc.split("\n")[:-1])
inp["type"] = doc.split("\n")[-1].split("type: ")[-1]
inp["params"], inp["params_doc"] = get_params(cls.__init__)
inp["shortcuts"] = list(cls.get_shortcut_implementations().items())
docset.append(inp)
return docset
inputs = document(InputComponent.__subclasses__())
outputs = document(OutputComponent.__subclasses__())
interface_params = get_params(Interface.__init__)
interface = {
"doc": inspect.getdoc(Interface),
"params": interface_params[0],
"params_doc": interface_params[1],
}
launch_params = get_params(Interface.launch)
launch = {
"params": launch_params[0],
"params_doc": launch_params[1],
}
with open("docs.json", "w") as docs:
json.dump({
"inputs": inputs,
"outputs": outputs,
"interface": interface,
"launch": launch
}, docs)

View File

@ -7,17 +7,13 @@ body#lib {
button, input[type="submit"], input[type="reset"], input[type="text"], input[type="button"], select[type="submit"] {
border: none;
font: inherit;
cursor: pointer;
outline: inherit;
-webkit-appearance: none;
}
select {
font: inherit;
}
label, input[type=radio], input[type=checkbox], select, input[type=range] {
cursor: pointer;
}
button, input[type="submit"], input[type="reset"], input[type="button"], select[type="submit"] {
label, input[type=radio], input[type=checkbox], select, input[type=range], button, input[type="submit"], input[type="reset"], input[type="button"], select[type="submit"] {
cursor: pointer;
}
@ -43,6 +39,9 @@ button, input[type="submit"], input[type="reset"], input[type="button"], select[
max-width: 1028px;
width: 100%;
margin: 0 auto;
padding-left: 24px;
padding-right: 24px;
box-sizing: border-box;
}
.panels {
display: flex;

View File

@ -5,31 +5,30 @@ function gradio(config, fn, target) {
Live at <a class="share-link" target="_blank"></a>.
<button class="share-copy">Copy Link</button>
</div>
<div class="container">
<h1 class="title"></h1>
<p class="description"></p>
<div class="panels">
<div class="panel input_panel">
<div class="input_interfaces">
</div>
<div class="panel_buttons">
<input class="clear panel_button" type="reset" value="CLEAR">
<input class="submit panel_button" type="submit" value="SUBMIT"/>
</div>
<h1 class="title"></h1>
<p class="description"></p>
<div class="panels">
<div class="panel input_panel">
<div class="input_interfaces">
</div>
<div class="panel_buttons">
<input class="clear panel_button" type="reset" value="CLEAR">
<input class="submit panel_button" type="submit" value="SUBMIT"/>
</div>
<div class="panel output_panel">
<div class="loading invisible">
<img class="loading_in_progress" src="/static/img/logo_loading.gif">
<img class="loading_failed" src="/static/img/logo_error.png">
</div>
<div class="panel output_panel">
<div class="loading invisible">
<img class="loading_in_progress" src="/static/img/logo_loading.gif">
<img class="loading_failed" src="/static/img/logo_error.png">
</div>
<div class="output_interfaces">
</div>
<div class="panel_buttons">
<input class="screenshot panel_button" type="button" value="SCREENSHOT"/>
<div class="screenshot_logo">
<img src="/static/img/logo_inline.png">
</div>
<div class="output_interfaces">
</div>
<div class="panel_buttons">
<input class="screenshot panel_button" type="button" value="SCREENSHOT"/>
<div class="screenshot_logo">
<img src="/static/img/logo_inline.png">
</div>
<input class="flag panel_button" type="button" value="FLAG"/>
<input class="flag panel_button" type="button" value="FLAG"/>
</div>
</div>
</div>

View File

@ -46,6 +46,9 @@ const image_input = {
var io = this;
this.source = opts.source;
this.tool = opts.tool;
if (this.tool == "select") {
this.target.find('.edit_holder').hide();
}
$('body').append(this.overlay_html.format(this.id));
this.overlay_target = $(`.overlay[interface_id=${this.id}]`);
if (this.source == "upload") {

View File

@ -41,7 +41,6 @@
<link rel="stylesheet" href="/static/css/style.css">
<link rel="stylesheet" href="/static/css/gradio.css">
<link rel="stylesheet" href="/static/css/interfaces/input/csv.css">
<link rel="stylesheet" href="/static/css/interfaces/input/image.css">
<link rel="stylesheet" href="/static/css/interfaces/input/sketchpad.css">
<link rel="stylesheet" href="/static/css/interfaces/input/textbox.css">
@ -64,7 +63,7 @@
</head>
<body id="lib">
<div id="interface_target"></div>
<div id="interface_target" class="container"></div>
<div id="credit"><a href="https://github.com/gradio-app/gradio" target="_blank">
<img src="static/img/logo_inline.png">
</a></div>
@ -88,12 +87,12 @@
<script src="/static/js/vendor/jexcel.min.js"></script>
<script src="/static/js/vendor/jsuites.min.js"></script>
<script src="/static/js/vendor/cropper.min.js"></script>
<script src="/static/js/vendor/sketchpad.js"></script>
<script src="/static/js/vendor/webcam.min.js"></script>
<script src="/static/js/utils.js"></script>
<script src="/static/js/all_io.js"></script>
<script src="/static/js/interfaces/input/csv.js"></script>
<script src="/static/js/interfaces/input/image.js"></script>
<script src="/static/js/vendor/sketchpad.js"></script>
<script src="/static/js/interfaces/input/sketchpad.js"></script>
<script src="/static/js/interfaces/input/textbox.js"></script>
<script src="/static/js/interfaces/input/radio.js"></script>
@ -104,7 +103,6 @@
<script src="/static/js/interfaces/input/dataframe.js"></script>
<script src="/static/js/interfaces/input/audio.js"></script>
<script src="/static/js/interfaces/input/file.js"></script>
<script src="/static/js/vendor/webcam.min.js"></script>
<script src="/static/js/interfaces/input/webcam.js"></script>
<script src="/static/js/interfaces/input/microphone.js"></script>
<script src="/static/js/interfaces/output/image.js"></script>

View File

@ -5,7 +5,7 @@ except ImportError:
setup(
name='gradio',
version='1.1.6',
version='1.1.7',
include_package_data=True,
description='Python library for easily interacting with trained machine learning models',
author='Abubakar Abid',