mirror of
https://github.com/gradio-app/gradio.git
synced 2025-01-06 10:25:17 +08:00
Merge branch 'master' of https://github.com/gradio-app/gradio-UI
This commit is contained in:
commit
42dfc7b7db
2
.gitignore
vendored
2
.gitignore
vendored
@ -17,3 +17,5 @@ __pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
demo/models/*
|
||||
dist/*
|
||||
*.h5
|
||||
|
@ -42,11 +42,13 @@ The core Interface class is initialized with three parameters:
|
||||
- `inputs`: the name of the input interface
|
||||
- `outputs`: the name of the output interface
|
||||
|
||||
Calling the `launch()` function of the `Interface` object produces the interface shown in image below.
|
||||
Calling the `launch()` function of the `Interface` object produces the interface shown in image below. Click on the screenshot to go the live interface in our getting started page.
|
||||
|
||||
<a href="https://gradio.app/getting_started#interface_4">
|
||||
<p align="center">
|
||||
<img src="https://i.ibb.co/Z8p7gLZ/hello-world.png" alt="drawing"/>
|
||||
</p>
|
||||
</a>
|
||||
|
||||
### 1. Inception Net [![alt text](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1c6gQiW88wKBwWq96nqEwuQ1Kyt5LejiU?usp=sharing)
|
||||
|
||||
@ -98,7 +100,7 @@ You can supply your own model instead of the pretrained model above, as well as
|
||||
- `Textbox(lines=1, placeholder=None, label=None, numeric=False)`
|
||||
- `Radio(choices, label=None)`
|
||||
- `Dropdown(choices, label=None)`
|
||||
- `CheckboxGroup(choices, label=None)
|
||||
- `CheckboxGroup(choices, label=None)`
|
||||
- `Slider(minimum=0, maximum=100, default=None, label=None)`
|
||||
- `Image(shape=(224, 224, 3), image_mode='RGB', scale=1/127.5, shift=-1, label=None)`
|
||||
- `Microphone()`
|
||||
|
@ -40,12 +40,6 @@ class AbstractInput(ABC):
|
||||
"""
|
||||
return {"label": self.label}
|
||||
|
||||
def sample_inputs(self):
|
||||
"""
|
||||
An interface can optionally implement a method that sends a list of sample inputs for inference.
|
||||
"""
|
||||
return []
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
By default, no pre-processing is applied to text.
|
||||
@ -67,17 +61,12 @@ class AbstractInput(ABC):
|
||||
|
||||
|
||||
class Sketchpad(AbstractInput):
|
||||
def __init__(self, cast_to="numpy", shape=(28, 28), invert_colors=True,
|
||||
flatten=False, scale=1/255, shift=0,
|
||||
dtype='float64', sample_inputs=None, label=None):
|
||||
def __init__(self, shape=(28, 28), invert_colors=True,
|
||||
flatten=False, label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.invert_colors = invert_colors
|
||||
self.flatten = flatten
|
||||
self.scale = scale
|
||||
self.shift = shift
|
||||
self.dtype = dtype
|
||||
self.sample_inputs = sample_inputs
|
||||
super().__init__(label)
|
||||
|
||||
@classmethod
|
||||
@ -101,8 +90,6 @@ class Sketchpad(AbstractInput):
|
||||
array = np.array(im).flatten().reshape(1, self.image_width * self.image_height)
|
||||
else:
|
||||
array = np.array(im).flatten().reshape(1, self.image_width, self.image_height)
|
||||
array = array * self.scale + self.shift
|
||||
array = array.astype(self.dtype)
|
||||
return array
|
||||
|
||||
def process_example(self, example):
|
||||
@ -136,8 +123,7 @@ class Webcam(AbstractInput):
|
||||
|
||||
|
||||
class Textbox(AbstractInput):
|
||||
def __init__(self, sample_inputs=None, lines=1, placeholder=None, default=None, label=None, numeric=False):
|
||||
self.sample_inputs = sample_inputs
|
||||
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, label=None):
|
||||
self.lines = lines
|
||||
self.placeholder = placeholder
|
||||
self.default = default
|
||||
@ -227,7 +213,7 @@ class Slider(AbstractInput):
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"checkbox": {},
|
||||
"slider": {},
|
||||
}
|
||||
|
||||
|
||||
@ -243,8 +229,7 @@ class Checkbox(AbstractInput):
|
||||
|
||||
|
||||
class Image(AbstractInput):
|
||||
def __init__(self, cast_to=None, shape=(224, 224), image_mode='RGB', label=None):
|
||||
self.cast_to = cast_to
|
||||
def __init__(self, shape=(224, 224), image_mode='RGB', label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.image_mode = image_mode
|
||||
@ -264,29 +249,10 @@ class Image(AbstractInput):
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
def cast_to_base64(self, inp):
|
||||
return inp
|
||||
|
||||
def cast_to_im(self, inp):
|
||||
return preprocessing_utils.decode_base64_to_image(inp)
|
||||
|
||||
def cast_to_numpy(self, inp):
|
||||
im = self.cast_to_im(inp)
|
||||
arr = np.array(im).flatten()
|
||||
return arr
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
|
||||
"""
|
||||
cast_to_type = {
|
||||
"base64": self.cast_to_base64,
|
||||
"numpy": self.cast_to_numpy,
|
||||
"pillow": self.cast_to_im
|
||||
}
|
||||
if self.cast_to:
|
||||
return cast_to_type[self.cast_to](inp)
|
||||
|
||||
im = preprocessing_utils.decode_base64_to_image(inp)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
@ -303,6 +269,8 @@ class Image(AbstractInput):
|
||||
|
||||
|
||||
class Microphone(AbstractInput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
|
@ -281,7 +281,6 @@ class Interface:
|
||||
from_ipynb = get_ipython()
|
||||
if "google.colab" in str(from_ipynb):
|
||||
is_colab = True
|
||||
print("Google colab notebook detected.")
|
||||
except NameError:
|
||||
data = {'error': 'NameError in launch method'}
|
||||
try:
|
||||
@ -307,10 +306,10 @@ class Interface:
|
||||
print(strings.en["RUNNING_LOCALLY"].format(path_to_local_server))
|
||||
else:
|
||||
if debug:
|
||||
print("This cell will run indefinitely so that you can see errors and logs. To turn off, "
|
||||
"set debug=False in launch().")
|
||||
print("Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
|
||||
"To turn off, set debug=False in launch().")
|
||||
else:
|
||||
print("To show errors in colab notebook, set debug=True in launch()")
|
||||
print("Colab notebook detected. To show errors in colab notebook, set debug=True in launch()")
|
||||
|
||||
if share:
|
||||
try:
|
||||
@ -331,6 +330,7 @@ class Interface:
|
||||
is_colab
|
||||
): # For a colab notebook, create a public link even if share is False.
|
||||
share_url = networking.setup_tunnel(server_port)
|
||||
print("Running on External URL:", share_url)
|
||||
if self.verbose:
|
||||
print(strings.en["COLAB_NO_LOCAL"])
|
||||
else: # If it's not a colab notebook and share=False, print a message telling them about the share option.
|
||||
|
@ -14,7 +14,7 @@ from gradio.tunneling import create_tunnel
|
||||
import urllib.request
|
||||
from shutil import copyfile
|
||||
import requests
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
INITIAL_PORT_VALUE = (
|
||||
@ -117,7 +117,6 @@ def get_first_available_port(initial, final):
|
||||
def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME):
|
||||
class HTTPHandler(SimpleHTTPRequestHandler):
|
||||
"""This handler uses server.base_path instead of always using os.getcwd()"""
|
||||
|
||||
def _set_headers(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
@ -134,7 +133,6 @@ def serve_files_in_background(interface, port, directory_to_serve=None, server_n
|
||||
|
||||
def do_POST(self):
|
||||
# Read body of the request.
|
||||
|
||||
if self.path == "/api/predict/":
|
||||
# Make the prediction.
|
||||
self._set_headers()
|
||||
@ -198,12 +196,13 @@ def serve_files_in_background(interface, port, directory_to_serve=None, server_n
|
||||
|
||||
# Now loop forever
|
||||
def serve_forever():
|
||||
# try:
|
||||
while True:
|
||||
# sys.stdout.flush()
|
||||
httpd.serve_forever()
|
||||
# except (KeyboardInterrupt, OSError):
|
||||
# httpd.server_close()
|
||||
try:
|
||||
while True:
|
||||
sys.stdout.flush()
|
||||
httpd.serve_forever()
|
||||
except (KeyboardInterrupt, OSError):
|
||||
httpd.shutdown()
|
||||
httpd.server_close()
|
||||
|
||||
thread = threading.Thread(target=serve_forever, daemon=False)
|
||||
thread.start()
|
||||
@ -215,13 +214,11 @@ def start_simple_server(interface, directory_to_serve=None, server_name=None):
|
||||
port = get_first_available_port(
|
||||
INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
|
||||
)
|
||||
httpd = serve_files_in_background(
|
||||
interface, port, directory_to_serve, server_name)
|
||||
httpd = serve_files_in_background(interface, port, directory_to_serve, server_name)
|
||||
return port, httpd
|
||||
|
||||
|
||||
def close_server(server):
|
||||
server.shutdown()
|
||||
server.server_close()
|
||||
|
||||
|
||||
|
@ -10,6 +10,7 @@ import json
|
||||
from gradio import preprocessing_utils
|
||||
import datetime
|
||||
import operator
|
||||
from numbers import Number
|
||||
|
||||
# Where to find the static resources associated with each template.
|
||||
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
|
||||
@ -53,8 +54,8 @@ class Label(AbstractOutput):
|
||||
super().__init__(label)
|
||||
|
||||
def postprocess(self, prediction):
|
||||
if isinstance(prediction, str):
|
||||
return {"label": prediction}
|
||||
if isinstance(prediction, str) or isinstance(prediction, Number):
|
||||
return {"label": str(prediction)}
|
||||
elif isinstance(prediction, dict):
|
||||
sorted_pred = sorted(
|
||||
prediction.items(),
|
||||
@ -104,15 +105,11 @@ class KeyValues(AbstractOutput):
|
||||
|
||||
|
||||
class Textbox(AbstractOutput):
|
||||
def __init__(self, lines=1, placeholder=None, label=None):
|
||||
self.lines = lines
|
||||
self.placeholder = placeholder
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"lines": self.lines,
|
||||
"placeholder": self.placeholder,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
@ -121,7 +118,6 @@ class Textbox(AbstractOutput):
|
||||
return {
|
||||
"text": {},
|
||||
"number": {},
|
||||
"textbox": {"lines": 7}
|
||||
}
|
||||
|
||||
def postprocess(self, prediction):
|
||||
@ -133,7 +129,7 @@ class Textbox(AbstractOutput):
|
||||
|
||||
|
||||
class Image(AbstractOutput):
|
||||
def __init__(self, label=None, plot=False):
|
||||
def __init__(self, plot=False, label=None):
|
||||
self.plot = plot
|
||||
super().__init__(label)
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
.output_text {
|
||||
resize: none;
|
||||
width: 100%;
|
||||
font-size: 18px;
|
||||
outline: none;
|
||||
@ -8,4 +7,6 @@
|
||||
border: solid 1px black;
|
||||
box-sizing: border-box;
|
||||
padding: 4px;
|
||||
min-height: 30px;
|
||||
font-family: monospace;
|
||||
}
|
||||
|
@ -47,7 +47,11 @@ var io_master_template = {
|
||||
}
|
||||
|
||||
if (this.config.live) {
|
||||
this.gather();
|
||||
var io = this;
|
||||
var refresh_lag = this.config.refresh_lag || 0;
|
||||
window.setTimeout(function() {
|
||||
io.gather();
|
||||
}, refresh_lag);
|
||||
} else {
|
||||
this.target.find(".loading").addClass("invisible");
|
||||
this.target.find(".output_interface").removeClass("invisible");
|
||||
|
73
build/lib/gradio/static/js/all_io.js.bak
Normal file
73
build/lib/gradio/static/js/all_io.js.bak
Normal file
@ -0,0 +1,73 @@
|
||||
var io_master_template = {
|
||||
gather: function() {
|
||||
this.clear();
|
||||
for (let iface of this.input_interfaces) {
|
||||
iface.submit();
|
||||
}
|
||||
},
|
||||
clear: function() {
|
||||
this.last_input = new Array(this.input_interfaces.length);
|
||||
this.input_count = 0;
|
||||
},
|
||||
input: function(interface_id, data) {
|
||||
this.last_input[interface_id] = data;
|
||||
this.input_count += 1;
|
||||
if (this.input_count == this.input_interfaces.length) {
|
||||
this.submit();
|
||||
}
|
||||
},
|
||||
submit: function() {
|
||||
let io = this;
|
||||
if (!this.config.live) {
|
||||
this.target.find(".loading").removeClass("invisible");
|
||||
this.target.find(".loading_in_progress").show();
|
||||
this.target.find(".loading_failed").hide();
|
||||
this.target.find(".output_interface").addClass("invisible");
|
||||
this.target.find(".output_interfaces .panel_header").addClass("invisible");
|
||||
}
|
||||
this.fn(this.last_input).then((output) => {
|
||||
io.output(output);
|
||||
}).catch((error) => {
|
||||
console.error(error);
|
||||
this.target.find(".loading_in_progress").hide();
|
||||
this.target.find(".loading_failed").show();
|
||||
})
|
||||
},
|
||||
output: function(data) {
|
||||
this.last_output = data["data"];
|
||||
|
||||
for (let i = 0; i < this.output_interfaces.length; i++) {
|
||||
this.output_interfaces[i].output(data["data"][i]);
|
||||
}
|
||||
if (data["durations"]) {
|
||||
let ratio = this.output_interfaces.length / data["durations"].length;
|
||||
for (let i = 0; i < this.output_interfaces.length; i = i + ratio) {
|
||||
this.output_interfaces[i].target.parent().find(`.loading_time[interface="${i + ratio - 1}"]`).text("Latency: " + ((data["durations"][i / ratio])).toFixed(2) + "s");
|
||||
}
|
||||
}
|
||||
|
||||
if (this.config.live) {
|
||||
this.gather();
|
||||
} else {
|
||||
this.target.find(".loading").addClass("invisible");
|
||||
this.target.find(".output_interface").removeClass("invisible");
|
||||
this.target.find(".output_interfaces .panel_header").removeClass("invisible");
|
||||
}
|
||||
},
|
||||
flag: function(message) {
|
||||
var post_data = {
|
||||
'data': {
|
||||
'input_data' : toStringIfObject(this.last_input) ,
|
||||
'output_data' : toStringIfObject(this.last_output),
|
||||
'message' : message
|
||||
}
|
||||
}
|
||||
$.ajax({type: "POST",
|
||||
url: "/api/flag/",
|
||||
data: JSON.stringify(post_data),
|
||||
success: function(output){
|
||||
console.log("Flagging successful")
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
@ -1,13 +1,7 @@
|
||||
const textbox_output = {
|
||||
html: `<textarea readonly class="output_text"></textarea>`,
|
||||
html: `<div class="output_text"></div>`,
|
||||
init: function(opts) {
|
||||
if (opts.lines) {
|
||||
this.target.find(".output_text").attr("rows", opts.lines).css("height", "auto");
|
||||
this.target.css("height", "auto");
|
||||
}
|
||||
if (opts.placeholder) {
|
||||
this.target.find(".output_text").attr("placeholder", opts.placeholder)
|
||||
}
|
||||
this.target.css("height", "auto");
|
||||
},
|
||||
output: function(data) {
|
||||
this.target.find(".output_text").text(data);
|
||||
|
@ -31,7 +31,7 @@
|
||||
<a href="https://gradio.app"><img src="../static/img/logo_inline.png" /></a>
|
||||
</nav>
|
||||
<div id="share" class="invisible">
|
||||
Live at <a id="share-link"></a>.
|
||||
Live at <a id="share-link" target="_blank"></a>.
|
||||
<button id="share-copy">Copy Link</button>
|
||||
</div>
|
||||
<div class="container">
|
||||
@ -40,7 +40,7 @@
|
||||
</div>
|
||||
<div id="interface_target" class="container"></div>
|
||||
<div id="examples" class="container invisible">
|
||||
<h3>Examples</h3>
|
||||
<h3>Examples <small>(click to load)</small></h3>
|
||||
<table>
|
||||
</table>
|
||||
</div>
|
||||
|
@ -1,48 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# In[2]:
|
||||
|
||||
|
||||
# installing transformers
|
||||
# !pip install -q git+https://github.com/huggingface/transformers.git
|
||||
# !pip install -q tensorflow==2.1
|
||||
|
||||
|
||||
# In[12]:
|
||||
|
||||
|
||||
import tensorflow as tf
|
||||
from transformers import TFGPT2LMHeadModel, GPT2Tokenizer
|
||||
import gradio
|
||||
|
||||
|
||||
# In[4]:
|
||||
|
||||
|
||||
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
||||
|
||||
# add the EOS token as PAD token to avoid warnings
|
||||
model = TFGPT2LMHeadModel.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id)
|
||||
|
||||
|
||||
# In[15]:
|
||||
|
||||
|
||||
def predict(inp):
|
||||
input_ids = tokenizer.encode(inp, return_tensors='tf')
|
||||
beam_output = model.generate(input_ids, max_length=49, num_beams=5, no_repeat_ngram_size=2, early_stopping=True)
|
||||
output = tokenizer.decode(beam_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
return ".".join(output.split(".")[:-1]) + "."
|
||||
|
||||
# In[18]:
|
||||
|
||||
|
||||
gradio.Interface(predict,"textbox","textbox").launch(inbrowser=True)
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
import gradio as gr
|
||||
from time import sleep
|
||||
|
||||
|
||||
def answer_question(quantity, animal, place, activity_list, morning, etc):
|
||||
return f"""The {quantity} {animal}s went to the {place} where they {" and ".join(activity_list)} until the {"morning" if morning else "night"}""", "OK"
|
||||
@ -15,8 +15,8 @@ gr.Interface(answer_question,
|
||||
gr.inputs.Textbox(default="What else?"),
|
||||
],
|
||||
[
|
||||
gr.outputs.Textbox(lines=8),
|
||||
gr.outputs.Textbox(lines=1),
|
||||
gr.outputs.Textbox(),
|
||||
gr.outputs.Textbox(),
|
||||
],
|
||||
examples=[
|
||||
[2, "cat", "park", ["ran", "swam"], True],
|
||||
|
@ -3,44 +3,22 @@ import gradio
|
||||
import os
|
||||
from tensorflow.keras.layers import *
|
||||
import gradio as gr
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()
|
||||
x_train, x_test = x_train.reshape(-1,784) / 255.0, x_test.reshape(-1,784) / 255.0
|
||||
|
||||
def get_trained_model(n):
|
||||
model = tf.keras.models.Sequential()
|
||||
model.add(Reshape((28, 28, 1), input_shape=(784,)))
|
||||
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
|
||||
model.add(Conv2D(64, (3, 3), activation='relu'))
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Dropout(0.25))
|
||||
model.add(Flatten())
|
||||
model.add(Dense(128, activation='relu'))
|
||||
model.add(Dropout(0.5))
|
||||
model.add(Dense(10, activation='softmax'))
|
||||
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
|
||||
model.fit(x_train[:n], y_train[:n], epochs=2)
|
||||
print(model.evaluate(x_test, y_test))
|
||||
return model
|
||||
urlretrieve("https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5", "mnist-model.h5")
|
||||
model = tf.keras.models.load_model("mnist-model.h5")
|
||||
|
||||
if not os.path.exists("models/mnist.h5"):
|
||||
model = get_trained_model(n=50000)
|
||||
model.save('models/mnist.h5')
|
||||
else:
|
||||
model = tf.keras.models.load_model('models/mnist.h5')
|
||||
|
||||
graph = tf.get_default_graph()
|
||||
sess = tf.keras.backend.get_session()
|
||||
|
||||
def recognize_digit(image):
|
||||
with graph.as_default():
|
||||
with sess.as_default():
|
||||
prediction = model.predict(image).tolist()[0]
|
||||
prediction = model.predict(image).tolist()[0]
|
||||
return {str(i): prediction[i] for i in range(10)}
|
||||
|
||||
|
||||
gr.Interface(
|
||||
recognize_digit,
|
||||
gradio.inputs.Sketchpad(flatten=True),
|
||||
gradio.outputs.Label(num_top_classes=3),
|
||||
live=True
|
||||
).launch()
|
||||
live=True,
|
||||
capture_session=True,
|
||||
).launch()
|
||||
|
@ -2,6 +2,7 @@ import gradio as gr
|
||||
import numpy as np
|
||||
from time import time
|
||||
|
||||
|
||||
def flip(image):
|
||||
start = time()
|
||||
return image, {
|
||||
@ -9,14 +10,15 @@ def flip(image):
|
||||
"2": 0.8
|
||||
}
|
||||
|
||||
|
||||
def flip2(image):
|
||||
start = time()
|
||||
return np.fliplr(image), time() - start
|
||||
return np.fliplr(image), "stuff"
|
||||
|
||||
|
||||
gr.Interface(flip2,
|
||||
gr.Interface([flip, flip2],
|
||||
gr.inputs.Image(shape=(50, 50, 3)),
|
||||
["image", "text"],
|
||||
["image", "label"],
|
||||
examples=[
|
||||
["images/cheetah1.jpg"],
|
||||
["images/cheetah2.jpg"],
|
||||
|
@ -3,6 +3,7 @@ import random
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
def plot_forecast(final_year, companies, noise, show_legend, point_style):
|
||||
start_year = 2020
|
||||
x = np.arange(start_year, final_year + 1)
|
||||
@ -17,6 +18,7 @@ def plot_forecast(final_year, companies, noise, show_legend, point_style):
|
||||
plt.legend(companies)
|
||||
return plt
|
||||
|
||||
|
||||
gr.Interface(plot_forecast,
|
||||
[
|
||||
gr.inputs.Radio([2025, 2030, 2035, 2040],
|
||||
|
@ -1,177 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
# In[9]:
|
||||
|
||||
|
||||
import tensorflow as tf
|
||||
import sys
|
||||
import gradio
|
||||
from tensorflow.keras.layers import *
|
||||
from tensorflow.keras.datasets import imdb
|
||||
import json
|
||||
from tensorflow.keras import backend as K
|
||||
import numpy as np
|
||||
|
||||
|
||||
# In[2]:
|
||||
|
||||
|
||||
top_words = 5000 # Only keep the 5,000 most frequent words
|
||||
max_word_length = 500 # The maximum length of the review should be 500 words (trim/pad otherwise)
|
||||
|
||||
# (X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words);
|
||||
# # save np.load
|
||||
np_load_old = np.load
|
||||
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
|
||||
|
||||
# # # call load_data with allow_pickle implicitly set to true
|
||||
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words);
|
||||
|
||||
# # restore np.load for future normal usage
|
||||
np.load = np_load_old
|
||||
|
||||
X_train = tf.keras.preprocessing.sequence.pad_sequences(X_train, maxlen=max_word_length)
|
||||
X_test = tf.keras.preprocessing.sequence.pad_sequences(X_test, maxlen=max_word_length)
|
||||
|
||||
|
||||
def get_trained_model(n):
|
||||
model = tf.keras.models.Sequential()
|
||||
model.add(Embedding(top_words, 32, input_length=max_word_length))
|
||||
model.add(Dropout(0.2))
|
||||
model.add(Conv1D(250, 3, padding='valid', activation='relu', strides=1))
|
||||
model.add(GlobalMaxPooling1D())
|
||||
model.add(Dense(250))
|
||||
model.add(Dropout(0.2))
|
||||
model.add(Activation('relu'))
|
||||
model.add(Dense(1))
|
||||
model.add(Activation('sigmoid'))
|
||||
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
|
||||
model.fit(X_train[:n], y_train[:n], epochs=1, batch_size=128)
|
||||
print(model.evaluate(X_test[:n], y_test[:n]))
|
||||
return model
|
||||
|
||||
|
||||
# In[3]:
|
||||
|
||||
|
||||
model = get_trained_model(n=1000) #25000
|
||||
|
||||
|
||||
# In[4]:
|
||||
|
||||
|
||||
graph = tf.get_default_graph()
|
||||
sess = tf.keras.backend.get_session()
|
||||
|
||||
|
||||
# In[5]:
|
||||
|
||||
|
||||
NUM_SPECIAL_TOKENS = 3
|
||||
PAD_TOKEN = 0
|
||||
START_TOKEN = 1
|
||||
UNK_TOKEN = 2
|
||||
|
||||
word_to_id = tf.keras.datasets.imdb.get_word_index()
|
||||
word_to_id = {k: (v + NUM_SPECIAL_TOKENS) for k, v in word_to_id.items()}
|
||||
|
||||
id_to_word = {value: key for key, value in word_to_id.items()}
|
||||
id_to_word[PAD_TOKEN] = "" # Padding tokens are converted to empty strings.
|
||||
id_to_word[START_TOKEN] = "" # Start tokens are converted to empty strings.
|
||||
id_to_word[UNK_TOKEN] = "UNK" # <UNK> tokens are converted to "UNK".
|
||||
|
||||
|
||||
def decode_vector_to_text(vector):
|
||||
text = " ".join(id_to_word[id] for id in vector if id >= 2)
|
||||
return text
|
||||
|
||||
|
||||
def encode_text_to_vector(text, max_word_length=500, top_words=5000):
|
||||
text_vector = text.split(" ")
|
||||
encoded_vector = [
|
||||
word_to_id.get(element, UNK_TOKEN) if word_to_id.get(element, UNK_TOKEN) < top_words else UNK_TOKEN for element
|
||||
in text_vector]
|
||||
encoded_vector = [START_TOKEN] + encoded_vector
|
||||
if len(encoded_vector) < max_word_length:
|
||||
encoded_vector = (max_word_length - len(encoded_vector)) * [PAD_TOKEN] + encoded_vector
|
||||
else:
|
||||
encoded_vector = encoded_vector[:max_word_length]
|
||||
return encoded_vector
|
||||
|
||||
|
||||
def preprocessing(text):
|
||||
new = encode_text_to_vector(text)
|
||||
return tf.keras.preprocessing.sequence.pad_sequences([new], maxlen=max_word_length)
|
||||
|
||||
|
||||
def postprocessing(pred):
|
||||
return {
|
||||
"Positive review": f"{pred[0][0]}",
|
||||
"Negative review": f"{1-pred[0][0]}"
|
||||
}
|
||||
|
||||
def predict(inp):
|
||||
inp = preprocessing(inp)
|
||||
with graph.as_default():
|
||||
with sess.as_default():
|
||||
prediction = model.predict(inp)
|
||||
prediction = postprocessing(prediction)
|
||||
return prediction
|
||||
|
||||
|
||||
def saliency(input, output):
|
||||
with graph.as_default():
|
||||
with sess.as_default():
|
||||
processed_input = preprocessing(input)
|
||||
processed_output = output
|
||||
|
||||
output = 0 if float(output["Positive review"]) > 0.5 else 1
|
||||
input_tensors = [model.layers[0].input, K.learning_phase()]
|
||||
saliency_input = model.layers[1].input
|
||||
saliency_output = model.layers[-1].output[:, output]
|
||||
gradients = model.optimizer.get_gradients(saliency_output, saliency_input)
|
||||
compute_gradients = K.function(inputs=input_tensors, outputs=gradients)
|
||||
saliency_graph = compute_gradients(processed_input.reshape(1, 500))[0]
|
||||
|
||||
saliency_graph = saliency_graph.reshape(500, 32)
|
||||
|
||||
saliency_graph = np.abs(saliency_graph).sum(axis=1)
|
||||
normalized_saliency = (saliency_graph - saliency_graph.min()) / (saliency_graph.max() - saliency_graph.min())
|
||||
|
||||
start_idx = np.where(processed_input[0] == START_TOKEN)[0][0]
|
||||
heat_map = []
|
||||
counter = 0
|
||||
words = input.split(" ")
|
||||
for i in range(start_idx + 1, 500):
|
||||
heat_map.extend([normalized_saliency[i]] * len(words[counter]))
|
||||
heat_map.append(0) # zero saliency value assigned to the spaces between words
|
||||
counter += 1
|
||||
return np.array(heat_map)
|
||||
|
||||
|
||||
# In[6]:
|
||||
|
||||
|
||||
textbox = gradio.inputs.Textbox()
|
||||
label = gradio.outputs.Label()
|
||||
interface = gradio.Interface(inputs=textbox, outputs=label, fn=predict, saliency=saliency)
|
||||
|
||||
|
||||
# In[8]:
|
||||
|
||||
|
||||
interface.launch(inbrowser=True, share=False)
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# In[ ]:
|
||||
|
||||
|
||||
|
||||
|
@ -4,4 +4,4 @@ import numpy as np
|
||||
def snap(image):
|
||||
return image
|
||||
|
||||
gr.Interface(snap, gr.inputs.Webcam(shape=(50,100)), "image").launch()
|
||||
gr.Interface(snap, gr.inputs.Webcam(shape=(50,100)), "image").launch()
|
||||
|
2
gradio-0.9.9.5/MANIFEST.in
Normal file
2
gradio-0.9.9.5/MANIFEST.in
Normal file
@ -0,0 +1,2 @@
|
||||
recursive-include gradio/static *
|
||||
recursive-include gradio/templates *
|
168
gradio-0.9.9.5/README.md
Normal file
168
gradio-0.9.9.5/README.md
Normal file
@ -0,0 +1,168 @@
|
||||
[![CircleCI](https://circleci.com/gh/gradio-app/gradio-UI.svg?style=svg)](https://circleci.com/gh/gradio-app/gradio) [![PyPI version](https://badge.fury.io/py/gradio.svg)](https://badge.fury.io/py/gradio)
|
||||
|
||||
# Gradio UI
|
||||
|
||||
<img src="https://i.ibb.co/GHRk2JP/header-2.png" alt="drawing" width="1000"/>
|
||||
|
||||
At Gradio, we often try to understand what inputs a model is particularly sensitive to. To help facilitate this, we've developed and open-sourced `gradio`, a python library that allows you to quickly create input and output interfaces over trained models to make it easy for you to "play around" with your model in your browser by dragging-and-dropping in your own images (or pasting your own text, recording your own voice, etc.) and seeing what the model outputs. `gradio` can also generate a share link which allows anyone, anywhere to use the interface as the model continues to run on your machine.
|
||||
|
||||
Gradio is useful for:
|
||||
* Creating demos of your machine learning code for clients / collaborators / users
|
||||
* Getting feedback on model performance from users
|
||||
* Debugging your model interactively during development
|
||||
|
||||
To get a sense of `gradio`, take a look at a few of these examples, and find more on our website: www.gradio.app.
|
||||
|
||||
## Installation
|
||||
```
|
||||
pip install gradio
|
||||
```
|
||||
(you may need to replace `pip` with `pip3` if you're running `python3`).
|
||||
|
||||
## Usage
|
||||
|
||||
Gradio is very easy to use with your existing code. Here are a few working examples:
|
||||
|
||||
### 0. Hello World [![alt text](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/18ODkJvyxHutTN0P5APWyGFO_xwNcgHDZ?usp=sharing)
|
||||
|
||||
Let's start with a basic function (no machine learning yet!) that greets an input name. We'll wrap the function with a `Text` to `Text` interface.
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
def greet(name):
|
||||
return "Hello " + name + "!"
|
||||
|
||||
gr.Interface(fn=greet, inputs="text", outputs="text").launch()
|
||||
```
|
||||
|
||||
The core Interface class is initialized with three parameters:
|
||||
|
||||
- `fn`: the function to wrap
|
||||
- `inputs`: the name of the input interface
|
||||
- `outputs`: the name of the output interface
|
||||
|
||||
Calling the `launch()` function of the `Interface` object produces the interface shown in image below. Click on the screenshot to go the live interface in our getting started page.
|
||||
|
||||
<a href="https://gradio.app/getting_started#interface_4">
|
||||
<p align="center">
|
||||
<img src="https://i.ibb.co/Z8p7gLZ/hello-world.png" alt="drawing"/>
|
||||
</p>
|
||||
</a>
|
||||
|
||||
### 1. Inception Net [![alt text](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1c6gQiW88wKBwWq96nqEwuQ1Kyt5LejiU?usp=sharing)
|
||||
|
||||
Now, let's do a machine learning example. We're going to wrap an
|
||||
interface around the InceptionV3 image classifier, which we'll load
|
||||
using Tensorflow! Since this is an image classification model, we will use the `Image` input interface.
|
||||
We'll output a dictionary of labels and their corresponding confidence scores with the `Label` output
|
||||
interface. (The original Inception Net architecture [can be found here](https://arxiv.org/abs/1409.4842))
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
inception_net = tf.keras.applications.InceptionV3() # load the model
|
||||
|
||||
# Download human-readable labels for ImageNet.
|
||||
response = requests.get("https://git.io/JJkYN")
|
||||
labels = response.text.split("\n")
|
||||
|
||||
def classify_image(inp):
|
||||
inp = inp.reshape((-1, 299, 299, 3))
|
||||
inp = tf.keras.applications.inception_v3.preprocess_input(inp)
|
||||
prediction = inception_net.predict(inp).flatten()
|
||||
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
||||
|
||||
image = gr.inputs.Image(shape=(299, 299, 3))
|
||||
label = gr.outputs.Label(num_top_classes=3)
|
||||
|
||||
gr.Interface(fn=classify_image, inputs=image, outputs=label).launch()
|
||||
```
|
||||
This code will produce the interface below. The interface gives you a way to test
|
||||
Inception Net by dragging and dropping images, and also allows you to use naturally modify the input image using image editing tools that
|
||||
appear when you click EDIT. Notice here we provided actual `gradio.inputs` and `gradio.outputs` objects to the Interface
|
||||
function instead of using string shortcuts. This lets us use built-in preprocessing (e.g. image resizing)
|
||||
and postprocessing (e.g. choosing the number of labels to display) provided by these
|
||||
interfaces.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://i.ibb.co/BtRNc62/inception-net.png" alt="drawing"/>
|
||||
</p>
|
||||
|
||||
You can supply your own model instead of the pretrained model above, as well as use different kinds of models or functions. Here's a list of the interfaces we currently support, along with their preprocessing / postprocessing parameters:
|
||||
|
||||
**Input Interfaces**:
|
||||
- `Sketchpad(shape=(28, 28), invert_colors=True, flatten=False, scale=1/255, shift=0, dtype='float64')`
|
||||
- `Webcam(image_width=224, image_height=224, num_channels=3, label=None)`
|
||||
- `Textbox(lines=1, placeholder=None, label=None, numeric=False)`
|
||||
- `Radio(choices, label=None)`
|
||||
- `Dropdown(choices, label=None)`
|
||||
- `CheckboxGroup(choices, label=None)`
|
||||
- `Slider(minimum=0, maximum=100, default=None, label=None)`
|
||||
- `Image(shape=(224, 224, 3), image_mode='RGB', scale=1/127.5, shift=-1, label=None)`
|
||||
- `Microphone()`
|
||||
|
||||
**Output Interfaces**:
|
||||
- `Label(num_top_classes=None, label=None)`
|
||||
- `KeyValues(label=None)`
|
||||
- `Textbox(lines=1, placeholder=None, label=None)`
|
||||
- `Image(label=None, plot=False)`
|
||||
|
||||
Interfaces can also be combined together, for multiple-input or multiple-output models.
|
||||
|
||||
### 2. Real-Time MNIST [![alt text](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1LXJqwdkZNkt1J_yfLWQ3FLxbG2cAF8p4?usp=sharing)
|
||||
|
||||
Let's wrap a fun `Sketchpad`-to-`Label` UI around MNIST. For this example, we'll take advantage of the `live`
|
||||
feature in the library. Set `live=True` inside `Interface()`> to have it run continuous predictions.
|
||||
We've abstracted the model training from the code below, but you can see the full code on the colab link.
|
||||
|
||||
```python
|
||||
import tensorflow as tf
|
||||
import gradio as gr
|
||||
from urllib.request import urlretrieve
|
||||
|
||||
urlretrieve("https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5","mnist-model.h5")
|
||||
model = tf.keras.models.load_model("mnist-model.h5")
|
||||
|
||||
def recognize_digit(inp):
|
||||
prediction = model.predict(inp.reshape(1, 28, 28, 1)).tolist()[0]
|
||||
return {str(i): prediction[i] for i in range(10)}
|
||||
|
||||
sketchpad = gr.inputs.Sketchpad()
|
||||
label = gr.outputs.Label(num_top_classes=3)
|
||||
|
||||
gr.Interface(fn=recognize_digit, inputs=sketchpad,
|
||||
outputs=label, live=True).launch()
|
||||
```
|
||||
|
||||
This code will produce the interface below.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://i.ibb.co/9n2mGgk/mnist-live.png" alt="drawing"/>
|
||||
</p>
|
||||
|
||||
## Contributing:
|
||||
If you would like to contribute and your contribution is small, you can directly open a pull request (PR). If you would like to contribute a larger feature, we recommend first creating an issue with a proposed design for discussion. Please see our contributing guidelines for more info.
|
||||
|
||||
## License:
|
||||
Gradio is licensed under the Apache License 2.0
|
||||
|
||||
## See more:
|
||||
|
||||
You can find many more examples (like GPT-2, model comparison, multiple inputs, and numerical interfaces) as well as more info on usage on our website: www.gradio.app
|
||||
|
||||
See, also, the accompanying paper: ["Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild"](https://arxiv.org/pdf/1906.02569.pdf), *ICML HILL 2019*, and please use the citation below.
|
||||
|
||||
```
|
||||
@article{abid2019gradio,
|
||||
title={Gradio: Hassle-Free Sharing and Testing of ML Models in the Wild},
|
||||
author={Abid, Abubakar and Abdalla, Ali and Abid, Ali and Khan, Dawood and Alfozan, Abdulrahman and Zou, James},
|
||||
journal={arXiv preprint arXiv:1906.02569},
|
||||
year={2019}
|
||||
}
|
||||
```
|
||||
|
||||
|
11
gradio-0.9.9.5/gradio.egg-info/PKG-INFO
Normal file
11
gradio-0.9.9.5/gradio.egg-info/PKG-INFO
Normal file
@ -0,0 +1,11 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: gradio
|
||||
Version: 0.9.9.5
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid
|
||||
Author-email: a12d@stanford.edu
|
||||
License: UNKNOWN
|
||||
Description: UNKNOWN
|
||||
Keywords: machine learning,visualization,reproducibility
|
||||
Platform: UNKNOWN
|
89
gradio-0.9.9.5/gradio.egg-info/SOURCES.txt
Normal file
89
gradio-0.9.9.5/gradio.egg-info/SOURCES.txt
Normal file
@ -0,0 +1,89 @@
|
||||
MANIFEST.in
|
||||
README.md
|
||||
setup.py
|
||||
gradio/__init__.py
|
||||
gradio/inputs.py
|
||||
gradio/interface.py
|
||||
gradio/networking.py
|
||||
gradio/outputs.py
|
||||
gradio/preprocessing_utils.py
|
||||
gradio/strings.py
|
||||
gradio/tunneling.py
|
||||
gradio/validation_data.py
|
||||
gradio.egg-info/PKG-INFO
|
||||
gradio.egg-info/SOURCES.txt
|
||||
gradio.egg-info/dependency_links.txt
|
||||
gradio.egg-info/requires.txt
|
||||
gradio.egg-info/top_level.txt
|
||||
gradio/static/apple-app-site-association
|
||||
gradio/static/css/gradio.css
|
||||
gradio/static/css/loading.css
|
||||
gradio/static/css/style.css
|
||||
gradio/static/css/interfaces/input/checkbox_group.css
|
||||
gradio/static/css/interfaces/input/csv.css
|
||||
gradio/static/css/interfaces/input/dropdown.css
|
||||
gradio/static/css/interfaces/input/image.css
|
||||
gradio/static/css/interfaces/input/microphone.css
|
||||
gradio/static/css/interfaces/input/radio.css
|
||||
gradio/static/css/interfaces/input/sketchpad.css
|
||||
gradio/static/css/interfaces/input/slider.css
|
||||
gradio/static/css/interfaces/input/textbox.css
|
||||
gradio/static/css/interfaces/input/webcam.css
|
||||
gradio/static/css/interfaces/output/image.css
|
||||
gradio/static/css/interfaces/output/key_values.css
|
||||
gradio/static/css/interfaces/output/label.css
|
||||
gradio/static/css/interfaces/output/textbox.css
|
||||
gradio/static/css/vendor/tui-color-picker.css
|
||||
gradio/static/css/vendor/tui-image-editor.css
|
||||
gradio/static/img/logo.png
|
||||
gradio/static/img/logo_error.png
|
||||
gradio/static/img/logo_inline.png
|
||||
gradio/static/img/logo_loading.gif
|
||||
gradio/static/img/logo_mini.png
|
||||
gradio/static/img/logo_only.png
|
||||
gradio/static/img/mic.png
|
||||
gradio/static/img/mic_recording.png
|
||||
gradio/static/img/table.png
|
||||
gradio/static/img/webcam.png
|
||||
gradio/static/img/vendor/icon-a.svg
|
||||
gradio/static/img/vendor/icon-b.svg
|
||||
gradio/static/img/vendor/icon-c.svg
|
||||
gradio/static/img/vendor/icon-d.svg
|
||||
gradio/static/js/all_io.js
|
||||
gradio/static/js/all_io.js.bak
|
||||
gradio/static/js/gradio.js
|
||||
gradio/static/js/utils.js
|
||||
gradio/static/js/interfaces/input/checkbox.js
|
||||
gradio/static/js/interfaces/input/checkbox_group.js
|
||||
gradio/static/js/interfaces/input/dropdown.js
|
||||
gradio/static/js/interfaces/input/image.js
|
||||
gradio/static/js/interfaces/input/microphone.js
|
||||
gradio/static/js/interfaces/input/radio.js
|
||||
gradio/static/js/interfaces/input/sketchpad.js
|
||||
gradio/static/js/interfaces/input/slider.js
|
||||
gradio/static/js/interfaces/input/textbox.js
|
||||
gradio/static/js/interfaces/input/webcam.js
|
||||
gradio/static/js/interfaces/output/image.js
|
||||
gradio/static/js/interfaces/output/key_values.js
|
||||
gradio/static/js/interfaces/output/label.js
|
||||
gradio/static/js/interfaces/output/textbox.js
|
||||
gradio/static/js/vendor/FileSaver.min.js
|
||||
gradio/static/js/vendor/black-theme.js
|
||||
gradio/static/js/vendor/fabric.js
|
||||
gradio/static/js/vendor/jquery.min.js
|
||||
gradio/static/js/vendor/p5.dom.min.js
|
||||
gradio/static/js/vendor/p5.min.js
|
||||
gradio/static/js/vendor/p5.sound.min.js
|
||||
gradio/static/js/vendor/papaparse.min.js
|
||||
gradio/static/js/vendor/sketchpad.js
|
||||
gradio/static/js/vendor/tui-code-snippet.min.js
|
||||
gradio/static/js/vendor/tui-color-picker.js
|
||||
gradio/static/js/vendor/tui-image-editor.js
|
||||
gradio/static/js/vendor/wavesurfer.min.js
|
||||
gradio/static/js/vendor/webcam.min.js
|
||||
gradio/static/js/vendor/white-theme.js
|
||||
gradio/templates/index.html
|
||||
test/test_inputs.py
|
||||
test/test_interface.py
|
||||
test/test_networking.py
|
||||
test/test_outputs.py
|
1
gradio-0.9.9.5/gradio.egg-info/dependency_links.txt
Normal file
1
gradio-0.9.9.5/gradio.egg-info/dependency_links.txt
Normal file
@ -0,0 +1 @@
|
||||
|
7
gradio-0.9.9.5/gradio.egg-info/requires.txt
Normal file
7
gradio-0.9.9.5/gradio.egg-info/requires.txt
Normal file
@ -0,0 +1,7 @@
|
||||
numpy
|
||||
requests
|
||||
paramiko
|
||||
scipy
|
||||
IPython
|
||||
scikit-image
|
||||
analytics-python
|
1
gradio-0.9.9.5/gradio.egg-info/top_level.txt
Normal file
1
gradio-0.9.9.5/gradio.egg-info/top_level.txt
Normal file
@ -0,0 +1 @@
|
||||
gradio
|
1
gradio-0.9.9.5/gradio/__init__.py
Normal file
1
gradio-0.9.9.5/gradio/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from gradio.interface import * # This makes it possible to import `Interface` as `gradio.Interface`.
|
288
gradio-0.9.9.5/gradio/inputs.py
Normal file
288
gradio-0.9.9.5/gradio/inputs.py
Normal file
@ -0,0 +1,288 @@
|
||||
"""
|
||||
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
|
||||
`AbstractInput`, and each class must define a path to its template. All of the subclasses of `AbstractInput` are
|
||||
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from gradio import preprocessing_utils, validation_data
|
||||
import numpy as np
|
||||
import PIL.Image, PIL.ImageOps
|
||||
import time
|
||||
import warnings
|
||||
import json
|
||||
import datetime
|
||||
import os
|
||||
|
||||
# Where to find the static resources associated with each template.
|
||||
# BASE_INPUT_INTERFACE_TEMPLATE_PATH = 'static/js/interfaces/input/{}.js'
|
||||
BASE_INPUT_INTERFACE_JS_PATH = 'static/js/interfaces/input/{}.js'
|
||||
|
||||
|
||||
class AbstractInput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
When this is subclassed, it is automatically added to the registry
|
||||
"""
|
||||
def __init__(self, label):
|
||||
self.label = label
|
||||
|
||||
def get_validation_inputs(self):
|
||||
"""
|
||||
An interface can optionally implement a method that returns a list of examples inputs that it should be able to
|
||||
accept and preprocess for validation purposes.
|
||||
"""
|
||||
return []
|
||||
|
||||
def get_template_context(self):
|
||||
"""
|
||||
:return: a dictionary with context variables for the javascript file associated with the context
|
||||
"""
|
||||
return {"label": self.label}
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
By default, no pre-processing is applied to text.
|
||||
"""
|
||||
return inp
|
||||
|
||||
def process_example(self, example):
|
||||
"""
|
||||
Proprocess example for UI
|
||||
"""
|
||||
return example
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
"""
|
||||
Return dictionary of shortcut implementations
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class Sketchpad(AbstractInput):
|
||||
def __init__(self, shape=(28, 28), invert_colors=True,
|
||||
flatten=False, label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.invert_colors = invert_colors
|
||||
self.flatten = flatten
|
||||
super().__init__(label)
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"sketchpad": {},
|
||||
}
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
|
||||
"""
|
||||
im_transparent = preprocessing_utils.decode_base64_to_image(inp)
|
||||
im = PIL.Image.new("RGBA", im_transparent.size, "WHITE") # Create a white background for the alpha channel
|
||||
im.paste(im_transparent, (0, 0), im_transparent)
|
||||
im = im.convert('L')
|
||||
if self.invert_colors:
|
||||
im = PIL.ImageOps.invert(im)
|
||||
im = im.resize((self.image_width, self.image_height))
|
||||
if self.flatten:
|
||||
array = np.array(im).flatten().reshape(1, self.image_width * self.image_height)
|
||||
else:
|
||||
array = np.array(im).flatten().reshape(1, self.image_width, self.image_height)
|
||||
return array
|
||||
|
||||
def process_example(self, example):
|
||||
return preprocessing_utils.convert_file_to_base64(example)
|
||||
|
||||
|
||||
class Webcam(AbstractInput):
|
||||
def __init__(self, shape=(224, 224), label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.num_channels = 3
|
||||
super().__init__(label)
|
||||
|
||||
def get_validation_inputs(self):
|
||||
return validation_data.BASE64_COLOR_IMAGES
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"webcam": {},
|
||||
}
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
|
||||
"""
|
||||
im = preprocessing_utils.decode_base64_to_image(inp)
|
||||
im = im.convert('RGB')
|
||||
im = preprocessing_utils.resize_and_crop(im, (self.image_width, self.image_height))
|
||||
return np.array(im)
|
||||
|
||||
|
||||
class Textbox(AbstractInput):
|
||||
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, label=None):
|
||||
self.lines = lines
|
||||
self.placeholder = placeholder
|
||||
self.default = default
|
||||
self.numeric = numeric
|
||||
super().__init__(label)
|
||||
|
||||
def get_validation_inputs(self):
|
||||
return validation_data.ENGLISH_TEXTS
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"lines": self.lines,
|
||||
"placeholder": self.placeholder,
|
||||
"default": self.default,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"text": {},
|
||||
"textbox": {"lines": 7},
|
||||
"number": {"numeric": True}
|
||||
}
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
Cast type of input
|
||||
"""
|
||||
if self.numeric:
|
||||
return float(inp)
|
||||
else:
|
||||
return inp
|
||||
|
||||
|
||||
class Radio(AbstractInput):
|
||||
def __init__(self, choices, label=None):
|
||||
self.choices = choices
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"choices": self.choices,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
|
||||
class Dropdown(AbstractInput):
|
||||
def __init__(self, choices, label=None):
|
||||
self.choices = choices
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"choices": self.choices,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
|
||||
class CheckboxGroup(AbstractInput):
|
||||
def __init__(self, choices, label=None):
|
||||
self.choices = choices
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"choices": self.choices,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
|
||||
class Slider(AbstractInput):
|
||||
def __init__(self, minimum=0, maximum=100, default=None, label=None):
|
||||
self.minimum = minimum
|
||||
self.maximum = maximum
|
||||
self.default = minimum if default is None else default
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"minimum": self.minimum,
|
||||
"maximum": self.maximum,
|
||||
"default": self.default,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"slider": {},
|
||||
}
|
||||
|
||||
|
||||
class Checkbox(AbstractInput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"checkbox": {},
|
||||
}
|
||||
|
||||
|
||||
class Image(AbstractInput):
|
||||
def __init__(self, shape=(224, 224), image_mode='RGB', label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.image_mode = image_mode
|
||||
super().__init__(label)
|
||||
|
||||
def get_validation_inputs(self):
|
||||
return validation_data.BASE64_COLOR_IMAGES
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"image": {},
|
||||
}
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
|
||||
"""
|
||||
im = preprocessing_utils.decode_base64_to_image(inp)
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
im = im.convert(self.image_mode)
|
||||
|
||||
im = preprocessing_utils.resize_and_crop(im, (self.image_width, self.image_height))
|
||||
return np.array(im)
|
||||
|
||||
def process_example(self, example):
|
||||
if os.path.exists(example):
|
||||
return preprocessing_utils.convert_file_to_base64(example)
|
||||
else:
|
||||
return example
|
||||
|
||||
|
||||
class Microphone(AbstractInput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
By default, no pre-processing is applied to a microphone input file
|
||||
"""
|
||||
file_obj = preprocessing_utils.decode_base64_to_wav_file(inp)
|
||||
mfcc_array = preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
|
||||
return mfcc_array
|
||||
|
||||
|
||||
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
|
||||
shortcuts = {}
|
||||
for cls in AbstractInput.__subclasses__():
|
||||
for shortcut, parameters in cls.get_shortcut_implementations().items():
|
||||
shortcuts[shortcut] = cls(**parameters)
|
413
gradio-0.9.9.5/gradio/interface.py
Normal file
413
gradio-0.9.9.5/gradio/interface.py
Normal file
@ -0,0 +1,413 @@
|
||||
"""
|
||||
This is the core file in the `gradio` package, and defines the Interface class, including methods for constructing the
|
||||
interface using the input and output types.
|
||||
"""
|
||||
|
||||
import tempfile
|
||||
import traceback
|
||||
import webbrowser
|
||||
|
||||
import gradio.inputs
|
||||
import gradio.outputs
|
||||
from gradio import networking, strings
|
||||
from distutils.version import StrictVersion
|
||||
import pkg_resources
|
||||
import requests
|
||||
import random
|
||||
import time
|
||||
import inspect
|
||||
from IPython import get_ipython
|
||||
import sys
|
||||
import weakref
|
||||
import analytics
|
||||
import socket
|
||||
|
||||
|
||||
PKG_VERSION_URL = "https://gradio.app/api/pkg-version"
|
||||
analytics.write_key = "uxIFddIEuuUcFLf9VgH2teTEtPlWdkNy"
|
||||
analytics_url = 'https://api.gradio.app/'
|
||||
hostname = socket.gethostname()
|
||||
ip_address = socket.gethostbyname(hostname)
|
||||
|
||||
|
||||
class Interface:
|
||||
"""
|
||||
The Interface class represents a general input/output interface for a machine learning model. During construction,
|
||||
the appropriate inputs and outputs
|
||||
"""
|
||||
instances = weakref.WeakSet()
|
||||
|
||||
def __init__(self, fn, inputs, outputs, saliency=None, verbose=False, examples=None,
|
||||
live=False, show_input=True, show_output=True,
|
||||
capture_session=False, title=None, description=None,
|
||||
thumbnail=None, server_name=networking.LOCALHOST_NAME):
|
||||
"""
|
||||
:param fn: a function that will process the input panel data from the interface and return the output panel data.
|
||||
:param inputs: a string or `AbstractInput` representing the input interface.
|
||||
:param outputs: a string or `AbstractOutput` representing the output interface.
|
||||
"""
|
||||
def get_input_instance(iface):
|
||||
if isinstance(iface, str):
|
||||
return gradio.inputs.shortcuts[iface.lower()]
|
||||
elif isinstance(iface, gradio.inputs.AbstractInput):
|
||||
return iface
|
||||
else:
|
||||
raise ValueError("Input interface must be of type `str` or "
|
||||
"`AbstractInput`")
|
||||
|
||||
def get_output_instance(iface):
|
||||
if isinstance(iface, str):
|
||||
return gradio.outputs.shortcuts[iface.lower()]
|
||||
elif isinstance(iface, gradio.outputs.AbstractOutput):
|
||||
return iface
|
||||
else:
|
||||
raise ValueError(
|
||||
"Output interface must be of type `str` or "
|
||||
"`AbstractOutput`"
|
||||
)
|
||||
if isinstance(inputs, list):
|
||||
self.input_interfaces = [get_input_instance(i) for i in inputs]
|
||||
else:
|
||||
self.input_interfaces = [get_input_instance(inputs)]
|
||||
if isinstance(outputs, list):
|
||||
self.output_interfaces = [get_output_instance(i) for i in outputs]
|
||||
else:
|
||||
self.output_interfaces = [get_output_instance(outputs)]
|
||||
if not isinstance(fn, list):
|
||||
fn = [fn]
|
||||
self.output_interfaces *= len(fn)
|
||||
self.predict = fn
|
||||
self.verbose = verbose
|
||||
self.status = "OFF"
|
||||
self.saliency = saliency
|
||||
self.live = live
|
||||
self.show_input = show_input
|
||||
self.show_output = show_output
|
||||
self.flag_hash = random.getrandbits(32)
|
||||
self.capture_session = capture_session
|
||||
self.session = None
|
||||
self.server_name = server_name
|
||||
self.title = title
|
||||
self.description = description
|
||||
self.thumbnail = thumbnail
|
||||
self.examples = examples
|
||||
self.server_port = None
|
||||
self.simple_server = None
|
||||
Interface.instances.add(self)
|
||||
|
||||
data = {'fn': fn,
|
||||
'inputs': inputs,
|
||||
'outputs': outputs,
|
||||
'saliency': saliency,
|
||||
'live': live,
|
||||
'capture_session': capture_session,
|
||||
'host_name': hostname,
|
||||
'ip_address': ip_address
|
||||
}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-initiated-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("gradio-initiated-analytics/ Connection Error")
|
||||
|
||||
def get_config_file(self):
|
||||
config = {
|
||||
"input_interfaces": [
|
||||
(iface.__class__.__name__.lower(), iface.get_template_context())
|
||||
for iface in self.input_interfaces],
|
||||
"output_interfaces": [
|
||||
(iface.__class__.__name__.lower(), iface.get_template_context())
|
||||
for iface in self.output_interfaces],
|
||||
"function_count": len(self.predict),
|
||||
"live": self.live,
|
||||
"show_input": self.show_input,
|
||||
"show_output": self.show_output,
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"thumbnail": self.thumbnail
|
||||
}
|
||||
try:
|
||||
param_names = inspect.getfullargspec(self.predict[0])[0]
|
||||
for iface, param in zip(config["input_interfaces"], param_names):
|
||||
if not iface[1]["label"]:
|
||||
iface[1]["label"] = param.replace("_", " ")
|
||||
for i, iface in enumerate(config["output_interfaces"]):
|
||||
ret_name = "Output " + str(i + 1) if len(config["output_interfaces"]) > 1 else "Output"
|
||||
if not iface[1]["label"]:
|
||||
iface[1]["label"] = ret_name
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def process(self, raw_input):
|
||||
processed_input = [input_interface.preprocess(
|
||||
raw_input[i]) for i, input_interface in
|
||||
enumerate(self.input_interfaces)]
|
||||
predictions = []
|
||||
durations = []
|
||||
for predict_fn in self.predict:
|
||||
start = time.time()
|
||||
if self.capture_session and not(self.session is None):
|
||||
graph, sess = self.session
|
||||
with graph.as_default():
|
||||
with sess.as_default():
|
||||
prediction = predict_fn(*processed_input)
|
||||
else:
|
||||
try:
|
||||
prediction = predict_fn(*processed_input)
|
||||
except ValueError as exception:
|
||||
if str(exception).endswith("is not an element of this "
|
||||
"graph."):
|
||||
raise ValueError("It looks like you might be using "
|
||||
"tensorflow < 2.0. Please "
|
||||
"pass capture_session=True in "
|
||||
"Interface to avoid the 'Tensor is "
|
||||
"not an element of this graph.' "
|
||||
"error.")
|
||||
else:
|
||||
raise exception
|
||||
duration = time.time() - start
|
||||
|
||||
if len(self.output_interfaces) == len(self.predict):
|
||||
prediction = [prediction]
|
||||
durations.append(duration)
|
||||
predictions.extend(prediction)
|
||||
processed_output = [output_interface.postprocess(
|
||||
predictions[i]) for i, output_interface in enumerate(self.output_interfaces)]
|
||||
return processed_output, durations
|
||||
|
||||
def validate(self):
|
||||
if self.validate_flag:
|
||||
if self.verbose:
|
||||
print("Interface already validated")
|
||||
return
|
||||
validation_inputs = self.input_interface.get_validation_inputs()
|
||||
n = len(validation_inputs)
|
||||
if n == 0:
|
||||
self.validate_flag = True
|
||||
if self.verbose:
|
||||
print(
|
||||
"No validation samples for this interface... skipping validation."
|
||||
)
|
||||
return
|
||||
for m, msg in enumerate(validation_inputs):
|
||||
if self.verbose:
|
||||
print(
|
||||
"Validating samples: {}/{} [".format(m+1, n)
|
||||
+ "=" * (m + 1)
|
||||
+ "." * (n - m - 1)
|
||||
+ "]",
|
||||
end="\r",
|
||||
)
|
||||
try:
|
||||
processed_input = self.input_interface.preprocess(msg)
|
||||
prediction = self.predict(processed_input)
|
||||
except Exception as e:
|
||||
data = {'error': e}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-error-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("gradio-error-analytics/ Connection Error")
|
||||
if self.verbose:
|
||||
print("\n----------")
|
||||
print(
|
||||
"Validation failed, likely due to incompatible pre-processing and model input. See below:\n"
|
||||
)
|
||||
print(traceback.format_exc())
|
||||
break
|
||||
try:
|
||||
_ = self.output_interface.postprocess(prediction)
|
||||
except Exception as e:
|
||||
data = {'error': e}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-error-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("gradio-error-analytics/ Connection Error")
|
||||
if self.verbose:
|
||||
print("\n----------")
|
||||
print(
|
||||
"Validation failed, likely due to incompatible model output and post-processing."
|
||||
"See below:\n"
|
||||
)
|
||||
print(traceback.format_exc())
|
||||
break
|
||||
else: # This means if a break was not explicitly called
|
||||
self.validate_flag = True
|
||||
if self.verbose:
|
||||
print("\n\nValidation passed successfully!")
|
||||
return
|
||||
raise RuntimeError("Validation did not pass")
|
||||
|
||||
def close(self):
|
||||
if self.simple_server and not(self.simple_server.fileno() == -1): # checks to see if server is running
|
||||
print("Closing Gradio server on port {}...".format(self.server_port))
|
||||
networking.close_server(self.simple_server)
|
||||
|
||||
def launch(self, inline=None, inbrowser=None, share=False, validate=True, debug=False):
|
||||
"""
|
||||
Standard method shared by interfaces that creates the interface and sets up a websocket to communicate with it.
|
||||
:param inline: boolean. If True, then a gradio interface is created inline (e.g. in jupyter or colab notebook)
|
||||
:param inbrowser: boolean. If True, then a new browser window opens with the gradio interface.
|
||||
:param share: boolean. If True, then a share link is generated using ngrok is displayed to the user.
|
||||
:param validate: boolean. If True, then the validation is run if the interface has not already been validated.
|
||||
"""
|
||||
# if validate and not self.validate_flag:
|
||||
# self.validate()
|
||||
|
||||
if self.capture_session:
|
||||
try:
|
||||
import tensorflow as tf
|
||||
self.session = tf.get_default_graph(), \
|
||||
tf.keras.backend.get_session()
|
||||
except (ImportError, AttributeError): # If they are using TF >= 2.0 or don't have TF, just ignore this.
|
||||
pass
|
||||
|
||||
output_directory = tempfile.mkdtemp()
|
||||
# Set up a port to serve the directory containing the static files with interface.
|
||||
server_port, httpd = networking.start_simple_server(self, output_directory, self.server_name)
|
||||
path_to_local_server = "http://{}:{}/".format(self.server_name, server_port)
|
||||
networking.build_template(output_directory)
|
||||
|
||||
self.server_port = server_port
|
||||
self.status = "RUNNING"
|
||||
self.simple_server = httpd
|
||||
|
||||
is_colab = False
|
||||
try: # Check if running interactively using ipython.
|
||||
from_ipynb = get_ipython()
|
||||
if "google.colab" in str(from_ipynb):
|
||||
is_colab = True
|
||||
except NameError:
|
||||
data = {'error': 'NameError in launch method'}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-error-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("Connection Error")
|
||||
pass
|
||||
|
||||
try:
|
||||
current_pkg_version = pkg_resources.require("gradio")[0].version
|
||||
latest_pkg_version = requests.get(url=PKG_VERSION_URL).json()["version"]
|
||||
if StrictVersion(latest_pkg_version) > StrictVersion(current_pkg_version):
|
||||
print("IMPORTANT: You are using gradio version {}, "
|
||||
"however version {} "
|
||||
"is available, please upgrade.".format(
|
||||
current_pkg_version, latest_pkg_version))
|
||||
print('--------')
|
||||
except: # TODO(abidlabs): don't catch all exceptions
|
||||
pass
|
||||
|
||||
if not is_colab:
|
||||
print(strings.en["RUNNING_LOCALLY"].format(path_to_local_server))
|
||||
else:
|
||||
if debug:
|
||||
print("Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
|
||||
"To turn off, set debug=False in launch().")
|
||||
else:
|
||||
print("Colab notebook detected. To show errors in colab notebook, set debug=True in launch()")
|
||||
|
||||
if share:
|
||||
try:
|
||||
share_url = networking.setup_tunnel(server_port)
|
||||
print("Running on External URL:", share_url)
|
||||
except RuntimeError:
|
||||
data = {'error': 'RuntimeError in launch method'}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-error-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("Connection Error")
|
||||
share_url = None
|
||||
if self.verbose:
|
||||
print(strings.en["NGROK_NO_INTERNET"])
|
||||
else:
|
||||
if (
|
||||
is_colab
|
||||
): # For a colab notebook, create a public link even if share is False.
|
||||
share_url = networking.setup_tunnel(server_port)
|
||||
print("Running on External URL:", share_url)
|
||||
if self.verbose:
|
||||
print(strings.en["COLAB_NO_LOCAL"])
|
||||
else: # If it's not a colab notebook and share=False, print a message telling them about the share option.
|
||||
if self.verbose:
|
||||
print(strings.en["PUBLIC_SHARE_TRUE"])
|
||||
share_url = None
|
||||
|
||||
if inline is None:
|
||||
try: # Check if running interactively using ipython.
|
||||
get_ipython()
|
||||
inline = True
|
||||
if inbrowser is None:
|
||||
inbrowser = False
|
||||
except NameError:
|
||||
inline = False
|
||||
if inbrowser is None:
|
||||
inbrowser = True
|
||||
else:
|
||||
if inbrowser is None:
|
||||
inbrowser = False
|
||||
|
||||
if inbrowser and not is_colab:
|
||||
webbrowser.open(
|
||||
path_to_local_server
|
||||
) # Open a browser tab with the interface.
|
||||
if inline:
|
||||
from IPython.display import IFrame, display
|
||||
|
||||
if (
|
||||
is_colab
|
||||
): # Embed the remote interface page if on google colab;
|
||||
# otherwise, embed the local page.
|
||||
while not networking.url_ok(share_url):
|
||||
time.sleep(1)
|
||||
display(IFrame(share_url, width=1000, height=500))
|
||||
else:
|
||||
display(IFrame(path_to_local_server, width=1000, height=500))
|
||||
|
||||
config = self.get_config_file()
|
||||
config["share_url"] = share_url
|
||||
|
||||
processed_examples = []
|
||||
if self.examples is not None:
|
||||
for example_set in self.examples:
|
||||
processed_set = []
|
||||
for iface, example in zip(self.input_interfaces, example_set):
|
||||
processed_set.append(iface.process_example(example))
|
||||
processed_examples.append(processed_set)
|
||||
config["examples"] = processed_examples
|
||||
|
||||
networking.set_config(config, output_directory)
|
||||
|
||||
if debug:
|
||||
while True:
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.1)
|
||||
|
||||
launch_method = 'browser' if inbrowser else 'inline'
|
||||
data = {'launch_method': launch_method,
|
||||
'is_google_colab': is_colab,
|
||||
'is_sharing_on': share,
|
||||
'share_url': share_url,
|
||||
'host_name': hostname,
|
||||
'ip_address': ip_address
|
||||
}
|
||||
try:
|
||||
requests.post(analytics_url + 'gradio-hosted-launched-analytics/',
|
||||
data=data)
|
||||
except requests.ConnectionError:
|
||||
print("Connection Error")
|
||||
return httpd, path_to_local_server, share_url
|
||||
|
||||
@classmethod
|
||||
def get_instances(cls):
|
||||
return list(Interface.instances) #Returns list of all current instances
|
||||
|
||||
|
||||
def reset_all():
|
||||
for io in Interface.get_instances():
|
||||
io.close()
|
252
gradio-0.9.9.5/gradio/networking.py
Normal file
252
gradio-0.9.9.5/gradio/networking.py
Normal file
@ -0,0 +1,252 @@
|
||||
"""
|
||||
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
|
||||
"""
|
||||
|
||||
import os
|
||||
import socket
|
||||
import threading
|
||||
from http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler
|
||||
import pkg_resources
|
||||
from distutils import dir_util
|
||||
from gradio import inputs, outputs
|
||||
import json
|
||||
from gradio.tunneling import create_tunnel
|
||||
import urllib.request
|
||||
from shutil import copyfile
|
||||
import requests
|
||||
import sys
|
||||
|
||||
|
||||
INITIAL_PORT_VALUE = (
|
||||
7860
|
||||
) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
|
||||
TRY_NUM_PORTS = (
|
||||
100
|
||||
) # Number of ports to try before giving up and throwing an exception.
|
||||
LOCALHOST_NAME = os.getenv('GRADIO_SERVER_NAME', "127.0.0.1")
|
||||
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
|
||||
|
||||
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
|
||||
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "static/")
|
||||
STATIC_PATH_TEMP = "static/"
|
||||
TEMPLATE_TEMP = "index.html"
|
||||
BASE_JS_FILE = "static/js/all_io.js"
|
||||
CONFIG_FILE = "static/config.json"
|
||||
|
||||
ASSOCIATION_PATH_IN_STATIC = "static/apple-app-site-association"
|
||||
ASSOCIATION_PATH_IN_ROOT = "apple-app-site-association"
|
||||
|
||||
FLAGGING_DIRECTORY = 'static/flagged/'
|
||||
FLAGGING_FILENAME = 'data.txt'
|
||||
|
||||
|
||||
def build_template(temp_dir):
|
||||
"""
|
||||
Create HTML file with supporting JS and CSS files in a given directory.
|
||||
:param temp_dir: string with path to temp directory in which the html file should be built
|
||||
"""
|
||||
dir_util.copy_tree(STATIC_TEMPLATE_LIB, temp_dir)
|
||||
dir_util.copy_tree(STATIC_PATH_LIB, os.path.join(
|
||||
temp_dir, STATIC_PATH_TEMP))
|
||||
|
||||
# Move association file to root of temporary directory.
|
||||
copyfile(os.path.join(temp_dir, ASSOCIATION_PATH_IN_STATIC),
|
||||
os.path.join(temp_dir, ASSOCIATION_PATH_IN_ROOT))
|
||||
|
||||
|
||||
def render_template_with_tags(template_path, context):
|
||||
"""
|
||||
Combines the given template with a given context dictionary by replacing all of the occurrences of tags (enclosed
|
||||
in double curly braces) with corresponding values.
|
||||
:param template_path: a string with the path to the template file
|
||||
:param context: a dictionary whose string keys are the tags to replace and whose string values are the replacements.
|
||||
"""
|
||||
print(template_path, context)
|
||||
with open(template_path) as fin:
|
||||
old_lines = fin.readlines()
|
||||
new_lines = render_string_or_list_with_tags(old_lines, context)
|
||||
with open(template_path, "w") as fout:
|
||||
for line in new_lines:
|
||||
fout.write(line)
|
||||
|
||||
|
||||
def render_string_or_list_with_tags(old_lines, context):
|
||||
# Handle string case
|
||||
if isinstance(old_lines, str):
|
||||
for key, value in context.items():
|
||||
old_lines = old_lines.replace(r"{{" + key + r"}}", str(value))
|
||||
return old_lines
|
||||
|
||||
# Handle list case
|
||||
new_lines = []
|
||||
for line in old_lines:
|
||||
for key, value in context.items():
|
||||
line = line.replace(r"{{" + key + r"}}", str(value))
|
||||
new_lines.append(line)
|
||||
return new_lines
|
||||
|
||||
|
||||
def set_config(config, temp_dir):
|
||||
config_file = os.path.join(temp_dir, CONFIG_FILE)
|
||||
with open(config_file, "w") as output:
|
||||
json.dump(config, output)
|
||||
|
||||
|
||||
def get_first_available_port(initial, final):
|
||||
"""
|
||||
Gets the first open port in a specified range of port numbers
|
||||
:param initial: the initial value in the range of port numbers
|
||||
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
|
||||
:return:
|
||||
"""
|
||||
for port in range(initial, final):
|
||||
try:
|
||||
s = socket.socket() # create a socket object
|
||||
s.bind((LOCALHOST_NAME, port)) # Bind to the port
|
||||
s.close()
|
||||
return port
|
||||
except OSError:
|
||||
pass
|
||||
raise OSError(
|
||||
"All ports from {} to {} are in use. Please close a port.".format(
|
||||
initial, final
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME):
|
||||
class HTTPHandler(SimpleHTTPRequestHandler):
|
||||
"""This handler uses server.base_path instead of always using os.getcwd()"""
|
||||
def _set_headers(self):
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/json")
|
||||
self.end_headers()
|
||||
|
||||
def translate_path(self, path):
|
||||
path = SimpleHTTPRequestHandler.translate_path(self, path)
|
||||
relpath = os.path.relpath(path, os.getcwd())
|
||||
fullpath = os.path.join(self.server.base_path, relpath)
|
||||
return fullpath
|
||||
|
||||
def log_message(self, format, *args):
|
||||
return
|
||||
|
||||
def do_POST(self):
|
||||
# Read body of the request.
|
||||
if self.path == "/api/predict/":
|
||||
# Make the prediction.
|
||||
self._set_headers()
|
||||
data_string = self.rfile.read(
|
||||
int(self.headers["Content-Length"]))
|
||||
msg = json.loads(data_string)
|
||||
raw_input = msg["data"]
|
||||
prediction, durations = interface.process(raw_input)
|
||||
|
||||
output = {"data": prediction, "durations": durations}
|
||||
if interface.saliency is not None:
|
||||
saliency = interface.saliency(raw_input, prediction)
|
||||
output['saliency'] = saliency.tolist()
|
||||
# if interface.always_flag:
|
||||
# msg = json.loads(data_string)
|
||||
# flag_dir = os.path.join(FLAGGING_DIRECTORY, str(interface.hash))
|
||||
# os.makedirs(flag_dir, exist_ok=True)
|
||||
# output_flag = {'input': interface.input_interface.rebuild_flagged(flag_dir, msg['data']),
|
||||
# 'output': interface.output_interface.rebuild_flagged(flag_dir, processed_output),
|
||||
# }
|
||||
# with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
|
||||
# f.write(json.dumps(output_flag))
|
||||
# f.write("\n")
|
||||
|
||||
# Prepare return json dictionary.
|
||||
self.wfile.write(json.dumps(output).encode())
|
||||
|
||||
elif self.path == "/api/flag/":
|
||||
self._set_headers()
|
||||
data_string = self.rfile.read(
|
||||
int(self.headers["Content-Length"]))
|
||||
msg = json.loads(data_string)
|
||||
flag_dir = os.path.join(FLAGGING_DIRECTORY,
|
||||
str(interface.flag_hash))
|
||||
os.makedirs(flag_dir, exist_ok=True)
|
||||
output = {'inputs': [interface.input_interfaces[
|
||||
i].rebuild_flagged(
|
||||
flag_dir, msg['data']['input_data']) for i
|
||||
in range(len(interface.input_interfaces))],
|
||||
'outputs': [interface.output_interfaces[
|
||||
i].rebuild_flagged(
|
||||
flag_dir, msg['data']['output_data']) for i
|
||||
in range(len(interface.output_interfaces))],
|
||||
'message': msg['data']['message']}
|
||||
|
||||
with open(os.path.join(flag_dir, FLAGGING_FILENAME), 'a+') as f:
|
||||
f.write(json.dumps(output))
|
||||
f.write("\n")
|
||||
|
||||
else:
|
||||
self.send_error(404, 'Path not found: {}'.format(self.path))
|
||||
|
||||
class HTTPServer(BaseHTTPServer):
|
||||
"""The main server, you pass in base_path which is the path you want to serve requests from"""
|
||||
|
||||
def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler):
|
||||
self.base_path = base_path
|
||||
BaseHTTPServer.__init__(self, server_address, RequestHandlerClass)
|
||||
|
||||
httpd = HTTPServer(directory_to_serve, (server_name, port))
|
||||
|
||||
# Now loop forever
|
||||
def serve_forever():
|
||||
try:
|
||||
while True:
|
||||
sys.stdout.flush()
|
||||
httpd.serve_forever()
|
||||
except (KeyboardInterrupt, OSError):
|
||||
httpd.shutdown()
|
||||
httpd.server_close()
|
||||
|
||||
thread = threading.Thread(target=serve_forever, daemon=False)
|
||||
thread.start()
|
||||
|
||||
return httpd
|
||||
|
||||
|
||||
def start_simple_server(interface, directory_to_serve=None, server_name=None):
|
||||
port = get_first_available_port(
|
||||
INITIAL_PORT_VALUE, INITIAL_PORT_VALUE + TRY_NUM_PORTS
|
||||
)
|
||||
httpd = serve_files_in_background(interface, port, directory_to_serve, server_name)
|
||||
return port, httpd
|
||||
|
||||
|
||||
def close_server(server):
|
||||
server.server_close()
|
||||
|
||||
|
||||
def url_request(url):
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
url=url, headers={"content-type": "application/json"}
|
||||
)
|
||||
res = urllib.request.urlopen(req, timeout=10)
|
||||
return res
|
||||
except Exception as e:
|
||||
raise RuntimeError(str(e))
|
||||
|
||||
|
||||
def setup_tunnel(local_server_port):
|
||||
response = url_request(GRADIO_API_SERVER)
|
||||
if response and response.code == 200:
|
||||
try:
|
||||
payload = json.loads(response.read().decode("utf-8"))[0]
|
||||
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(str(e))
|
||||
|
||||
|
||||
def url_ok(url):
|
||||
try:
|
||||
r = requests.head(url)
|
||||
return r.status_code == 200
|
||||
except ConnectionError:
|
||||
return False
|
174
gradio-0.9.9.5/gradio/outputs.py
Normal file
174
gradio-0.9.9.5/gradio/outputs.py
Normal file
@ -0,0 +1,174 @@
|
||||
"""
|
||||
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
|
||||
`AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are
|
||||
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
import numpy as np
|
||||
import json
|
||||
from gradio import preprocessing_utils
|
||||
import datetime
|
||||
import operator
|
||||
from numbers import Number
|
||||
|
||||
# Where to find the static resources associated with each template.
|
||||
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
|
||||
|
||||
|
||||
class AbstractOutput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
When this is subclassed, it is automatically added to the registry
|
||||
"""
|
||||
def __init__(self, label):
|
||||
self.label = label
|
||||
|
||||
def get_template_context(self):
|
||||
"""
|
||||
:return: a dictionary with context variables for the javascript file associated with the context
|
||||
"""
|
||||
return {"label": self.label}
|
||||
|
||||
def postprocess(self, prediction):
|
||||
"""
|
||||
Any postprocessing needed to be performed on function output.
|
||||
"""
|
||||
return prediction
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
"""
|
||||
Return dictionary of shortcut implementations
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class Label(AbstractOutput):
|
||||
LABEL_KEY = "label"
|
||||
CONFIDENCE_KEY = "confidence"
|
||||
CONFIDENCES_KEY = "confidences"
|
||||
|
||||
def __init__(self, num_top_classes=None, label=None):
|
||||
self.num_top_classes = num_top_classes
|
||||
super().__init__(label)
|
||||
|
||||
def postprocess(self, prediction):
|
||||
if isinstance(prediction, str) or isinstance(prediction, Number):
|
||||
return {"label": str(prediction)}
|
||||
elif isinstance(prediction, dict):
|
||||
sorted_pred = sorted(
|
||||
prediction.items(),
|
||||
key=operator.itemgetter(1),
|
||||
reverse=True
|
||||
)
|
||||
if self.num_top_classes is not None:
|
||||
sorted_pred = sorted_pred[:self.num_top_classes]
|
||||
return {
|
||||
self.LABEL_KEY: sorted_pred[0][0],
|
||||
self.CONFIDENCES_KEY: [
|
||||
{
|
||||
self.LABEL_KEY: pred[0],
|
||||
self.CONFIDENCE_KEY: pred[1]
|
||||
} for pred in sorted_pred
|
||||
]
|
||||
}
|
||||
elif isinstance(prediction, int) or isinstance(prediction, float):
|
||||
return {self.LABEL_KEY: str(prediction)}
|
||||
else:
|
||||
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
|
||||
"float label, or a dictionary whose keys are labels and values are confidences.")
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"label": {},
|
||||
}
|
||||
|
||||
|
||||
class KeyValues(AbstractOutput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def postprocess(self, prediction):
|
||||
if isinstance(prediction, dict):
|
||||
return prediction
|
||||
else:
|
||||
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
|
||||
"labels and values are corresponding values.")
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"key_values": {},
|
||||
}
|
||||
|
||||
|
||||
class Textbox(AbstractOutput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"text": {},
|
||||
"number": {},
|
||||
}
|
||||
|
||||
def postprocess(self, prediction):
|
||||
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float):
|
||||
return str(prediction)
|
||||
else:
|
||||
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or"
|
||||
"an int/float that can be converted to a string.")
|
||||
|
||||
|
||||
class Image(AbstractOutput):
|
||||
def __init__(self, plot=False, label=None):
|
||||
self.plot = plot
|
||||
super().__init__(label)
|
||||
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"image": {},
|
||||
"plot": {"plot": True}
|
||||
}
|
||||
|
||||
def postprocess(self, prediction):
|
||||
"""
|
||||
"""
|
||||
if self.plot:
|
||||
try:
|
||||
return preprocessing_utils.encode_plot_to_base64(prediction)
|
||||
except:
|
||||
raise ValueError("The `Image` output interface expects a `matplotlib.pyplot` object"
|
||||
"if plt=True.")
|
||||
else:
|
||||
try:
|
||||
return preprocessing_utils.encode_array_to_base64(prediction)
|
||||
except:
|
||||
raise ValueError("The `Image` output interface (with plt=False) expects a numpy array.")
|
||||
|
||||
def rebuild_flagged(self, dir, msg):
|
||||
"""
|
||||
Default rebuild method to decode a base64 image
|
||||
"""
|
||||
im = preprocessing_utils.decode_base64_to_image(msg)
|
||||
timestamp = datetime.datetime.now()
|
||||
filename = 'output_{}.png'.format(timestamp.
|
||||
strftime("%Y-%m-%d-%H-%M-%S"))
|
||||
im.save('{}/{}'.format(dir, filename), 'PNG')
|
||||
return filename
|
||||
|
||||
|
||||
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
|
||||
shortcuts = {}
|
||||
for cls in AbstractOutput.__subclasses__():
|
||||
for shortcut, parameters in cls.get_shortcut_implementations().items():
|
||||
shortcuts[shortcut] = cls(**parameters)
|
148
gradio-0.9.9.5/gradio/preprocessing_utils.py
Normal file
148
gradio-0.9.9.5/gradio/preprocessing_utils.py
Normal file
@ -0,0 +1,148 @@
|
||||
from PIL import Image, ImageOps
|
||||
from io import BytesIO
|
||||
import base64
|
||||
import tempfile
|
||||
import scipy.io.wavfile
|
||||
from scipy.fftpack import dct
|
||||
import numpy as np
|
||||
import skimage
|
||||
|
||||
|
||||
#########################
|
||||
# IMAGE PRE-PROCESSING
|
||||
#########################
|
||||
def decode_base64_to_image(encoding):
|
||||
content = encoding.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
return Image.open(BytesIO(base64.b64decode(image_encoded)))
|
||||
|
||||
|
||||
def convert_file_to_base64(img):
|
||||
with open(img, "rb") as image_file:
|
||||
encoded_string = base64.b64encode(image_file.read())
|
||||
base64_str = str(encoded_string, 'utf-8')
|
||||
type = img.split(".")[-1]
|
||||
return "data:image/" + type + ";base64," + base64_str
|
||||
|
||||
def encode_plot_to_base64(plt):
|
||||
with BytesIO() as output_bytes:
|
||||
plt.savefig(output_bytes, format="png")
|
||||
bytes_data = output_bytes.getvalue()
|
||||
plt.close()
|
||||
base64_str = str(base64.b64encode(bytes_data), 'utf-8')
|
||||
return "data:image/png;base64," + base64_str
|
||||
|
||||
def encode_array_to_base64(image_array):
|
||||
with BytesIO() as output_bytes:
|
||||
PIL_image = Image.fromarray(skimage.img_as_ubyte(image_array))
|
||||
PIL_image.save(output_bytes, 'PNG')
|
||||
bytes_data = output_bytes.getvalue()
|
||||
base64_str = str(base64.b64encode(bytes_data), 'utf-8')
|
||||
return "data:image/png;base64," + base64_str
|
||||
|
||||
|
||||
def resize_and_crop(img, size, crop_type='center'):
|
||||
"""
|
||||
Resize and crop an image to fit the specified size.
|
||||
args:
|
||||
size: `(width, height)` tuple.
|
||||
crop_type: can be 'top', 'middle' or 'bottom', depending on this
|
||||
value, the image will cropped getting the 'top/left', 'middle' or
|
||||
'bottom/right' of the image to fit the size.
|
||||
raises:
|
||||
ValueError: if an invalid `crop_type` is provided.
|
||||
"""
|
||||
if crop_type == "top":
|
||||
center = (0, 0)
|
||||
elif crop_type == "center":
|
||||
center = (0.5, 0.5)
|
||||
else:
|
||||
raise ValueError
|
||||
return ImageOps.fit(img, size, centering=center)
|
||||
|
||||
##################
|
||||
# AUDIO FILES
|
||||
##################
|
||||
|
||||
def decode_base64_to_wav_file(encoding):
|
||||
inp = encoding.split(';')[1].split(',')[1]
|
||||
wav_obj = base64.b64decode(inp)
|
||||
file_obj = tempfile.NamedTemporaryFile()
|
||||
file_obj.close()
|
||||
with open(file_obj.name, 'wb') as f:
|
||||
f.write(wav_obj)
|
||||
return file_obj
|
||||
|
||||
|
||||
def generate_mfcc_features_from_audio_file(wav_filename,
|
||||
pre_emphasis=0.95,
|
||||
frame_size= 0.025,
|
||||
frame_stride=0.01,
|
||||
NFFT=512,
|
||||
nfilt=40,
|
||||
num_ceps=12,
|
||||
cep_lifter=22):
|
||||
"""
|
||||
Loads and preprocesses a .wav audio file into mfcc coefficients, the typical inputs to models.
|
||||
Adapted from: https://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
|
||||
:param wav_filename: string name of audio file to process.
|
||||
:param pre_emphasis: a float factor, typically 0.95 or 0.97, which amplifies high frequencies.
|
||||
:param frame_size: a float that is the length, in seconds, of time frame over which to take the fft.
|
||||
:param frame_stride: a float that is the offset, in seconds, between consecutive time frames.
|
||||
:param NFFT: The number of points in the short-time fft for each time frame.
|
||||
:param nfilt: The number of filters on the Mel-scale to extract frequency bands.
|
||||
:param num_ceps: the number of cepstral coefficients to retrain.
|
||||
:param cep_lifter: the int factor, by which to de-emphasize higher-frequency.
|
||||
:return: a numpy array of mfcc coefficients.
|
||||
"""
|
||||
sample_rate, signal = scipy.io.wavfile.read(wav_filename)
|
||||
emphasized_signal = np.append(signal[0], signal[1:] - pre_emphasis * signal[:-1])
|
||||
|
||||
frame_length, frame_step = frame_size * sample_rate, frame_stride * sample_rate # Convert from seconds to samples
|
||||
signal_length = len(emphasized_signal)
|
||||
frame_length = int(round(frame_length))
|
||||
frame_step = int(round(frame_step))
|
||||
num_frames = int(np.ceil(float(np.abs(signal_length - frame_length)) / frame_step)) # Make sure that we have at least 1 frame
|
||||
|
||||
pad_signal_length = num_frames * frame_step + frame_length
|
||||
z = np.zeros((pad_signal_length - signal_length))
|
||||
pad_signal = np.append(emphasized_signal, z) # Pad Signal to make sure that all frames have equal number of samples without truncating any samples from the original signal
|
||||
|
||||
indices = np.tile(np.arange(0, frame_length), (num_frames, 1)) + np.tile(np.arange(0, num_frames * frame_step, frame_step), (frame_length, 1)).T
|
||||
frames = pad_signal[indices.astype(np.int32, copy=False)]
|
||||
|
||||
frames *= np.hamming(frame_length)
|
||||
mag_frames = np.absolute(np.fft.rfft(frames, NFFT)) # Magnitude of the FFT
|
||||
pow_frames = ((1.0 / NFFT) * ((mag_frames) ** 2)) # Power Spectrum
|
||||
|
||||
low_freq_mel = 0
|
||||
high_freq_mel = (2595 * np.log10(1 + (sample_rate / 2) / 700)) # Convert Hz to Mel
|
||||
mel_points = np.linspace(low_freq_mel, high_freq_mel, nfilt + 2) # Equally spaced in Mel scale
|
||||
hz_points = (700 * (10**(mel_points / 2595) - 1)) # Convert Mel to Hz
|
||||
bin = np.floor((NFFT + 1) * hz_points / sample_rate)
|
||||
|
||||
fbank = np.zeros((nfilt, int(np.floor(NFFT / 2 + 1))))
|
||||
for m in range(1, nfilt + 1):
|
||||
f_m_minus = int(bin[m - 1]) # left
|
||||
f_m = int(bin[m]) # center
|
||||
f_m_plus = int(bin[m + 1]) # right
|
||||
|
||||
for k in range(f_m_minus, f_m):
|
||||
fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])
|
||||
for k in range(f_m, f_m_plus):
|
||||
fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])
|
||||
filter_banks = np.dot(pow_frames, fbank.T)
|
||||
filter_banks = np.where(filter_banks == 0, np.finfo(float).eps, filter_banks) # Numerical Stability
|
||||
filter_banks = 20 * np.log10(filter_banks) # dB
|
||||
|
||||
mfcc = dct(filter_banks, type=2, axis=1, norm='ortho')[:, 0: (num_ceps + 1)] # Keep filters 1-13 by default.
|
||||
(nframes, ncoeff) = mfcc.shape
|
||||
n = np.arange(ncoeff)
|
||||
lift = 1 + (cep_lifter / 2) * np.sin(np.pi * n / cep_lifter)
|
||||
mfcc *= lift
|
||||
|
||||
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
|
||||
mfcc -= (np.mean(mfcc, axis=0) + 1e-8)
|
||||
return mfcc[np.newaxis, :, :] # Create a batch dimension.
|
||||
|
||||
|
11
gradio-0.9.9.5/gradio/static/apple-app-site-association
Normal file
11
gradio-0.9.9.5/gradio/static/apple-app-site-association
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"applinks": {
|
||||
"apps": [],
|
||||
"details": [
|
||||
{
|
||||
"appID": "RHW8FBGSTX.app.gradio.Gradio",
|
||||
"paths": ["*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
125
gradio-0.9.9.5/gradio/static/css/gradio.css
Normal file
125
gradio-0.9.9.5/gradio/static/css/gradio.css
Normal file
@ -0,0 +1,125 @@
|
||||
.panel {
|
||||
min-width: 300px;
|
||||
flex: 1 1 0;
|
||||
display: inline-block;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
.panel:first-child {
|
||||
margin-right: 24px;
|
||||
}
|
||||
.panel:last-child {
|
||||
margin-left: 8px;
|
||||
}
|
||||
.panel_header, .interface {
|
||||
background-color: whitesmoke;
|
||||
}
|
||||
.panel_header {
|
||||
text-transform: uppercase;
|
||||
font-family: Arial;
|
||||
color: #888;
|
||||
padding: 6px 6px 0;
|
||||
font-size: 14px;
|
||||
font-weight: bold;
|
||||
display: flex;
|
||||
}
|
||||
.input_interfaces, .output_interfaces {
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
.interface {
|
||||
height: 360px;
|
||||
padding: 8px;
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
}
|
||||
.loading {
|
||||
margin-left: auto;
|
||||
}
|
||||
.loading img {
|
||||
display: none;
|
||||
}
|
||||
.panel_buttons {
|
||||
display: flex;
|
||||
margin-left: -8px;
|
||||
}
|
||||
.panel_buttons > * {
|
||||
margin-left: 8px;
|
||||
}
|
||||
.submit {
|
||||
display: none;
|
||||
}
|
||||
.submit, .clear, .panel_button {
|
||||
background-color: whitesmoke !important;
|
||||
flex-grow: 1;
|
||||
padding: 8px !important;
|
||||
box-sizing: border-box;
|
||||
text-transform: uppercase;
|
||||
font-weight: bold;
|
||||
border: 0 none !important;
|
||||
}
|
||||
.submit {
|
||||
background-color: #EEA45D !important;
|
||||
color: white !important;
|
||||
}
|
||||
.submit, .flag_message {
|
||||
flex-grow: 2 !important;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.flag_message {
|
||||
padding: 8px !important;
|
||||
background-color: whitesmoke !important;
|
||||
}
|
||||
|
||||
.upload_zone {
|
||||
font-weight: bold;
|
||||
font-size: 24px;
|
||||
color: #BBB;
|
||||
cursor: pointer;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
text-align: center;
|
||||
line-height: 1.5em;
|
||||
flex-flow: column;
|
||||
}
|
||||
.upload_zone img {
|
||||
height: 120px;
|
||||
|
||||
}
|
||||
.drop_zone {
|
||||
border: dashed 8px #DDD;
|
||||
}
|
||||
.edit_holder {
|
||||
display: flex;
|
||||
justify-content: flex-end;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
.interface_button {
|
||||
padding: 6px;
|
||||
text-transform: uppercase;
|
||||
font-weight: bold;
|
||||
font-size: 14px;
|
||||
}
|
||||
.overlay {
|
||||
position: absolute;
|
||||
height: 100vh;
|
||||
width: 100vw;
|
||||
position: fixed;
|
||||
z-index: 1;
|
||||
background-color: rgba(0, 0, 0, 0.7);
|
||||
top: 0;
|
||||
left: 0;
|
||||
}
|
||||
.flag.flagged {
|
||||
background-color: pink !important;
|
||||
}
|
||||
.loading {
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.invisible {
|
||||
display: none !important;
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
.checkbox_group {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
font-size: 18px;
|
||||
}
|
||||
.checkbox_group input, .checkbox {
|
||||
margin: 0px 4px 0px 0px;
|
||||
}
|
||||
.checkbox_group label {
|
||||
margin-right: 16px;
|
||||
}
|
26
gradio-0.9.9.5/gradio/static/css/interfaces/input/csv.css
Normal file
26
gradio-0.9.9.5/gradio/static/css/interfaces/input/csv.css
Normal file
@ -0,0 +1,26 @@
|
||||
.table_holder {
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
overflow: scroll;
|
||||
display: none;
|
||||
}
|
||||
.csv_preview {
|
||||
background-color: white;
|
||||
max-width: 100%;
|
||||
max-height: 100%;
|
||||
font-size: 12px;
|
||||
font-family: monospace;
|
||||
}
|
||||
.csv_preview tr {
|
||||
border-bottom: solid 1px black;
|
||||
}
|
||||
.csv_preview tr.header td {
|
||||
background-color: #EEA45D;
|
||||
font-weight: bold;
|
||||
}
|
||||
.csv_preview td {
|
||||
padding: 2px 4px;
|
||||
}
|
||||
.csv_preview td:nth-child(even) {
|
||||
background-color: whitesmoke;
|
||||
}
|
6875
gradio-0.9.9.5/gradio/static/css/loading.css
Normal file
6875
gradio-0.9.9.5/gradio/static/css/loading.css
Normal file
File diff suppressed because it is too large
Load Diff
114
gradio-0.9.9.5/gradio/static/css/style.css
Normal file
114
gradio-0.9.9.5/gradio/static/css/style.css
Normal file
@ -0,0 +1,114 @@
|
||||
body {
|
||||
font-family: 'Open Sans', sans-serif;
|
||||
margin: 0;
|
||||
}
|
||||
button, input[type="submit"], input[type="reset"], input[type="text"], input[type="button"], select[type="submit"] {
|
||||
background: none;
|
||||
color: inherit;
|
||||
border: none;
|
||||
padding: 0;
|
||||
font: inherit;
|
||||
cursor: pointer;
|
||||
outline: inherit;
|
||||
-webkit-appearance: none;
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
.loading_time {
|
||||
font-size: large;
|
||||
color: #EEA45D;
|
||||
text-align: right;
|
||||
padding-top: 5px;
|
||||
}
|
||||
|
||||
nav {
|
||||
text-align: center;
|
||||
padding: 16px 0 4px;
|
||||
}
|
||||
nav img {
|
||||
margin-right: auto;
|
||||
height: 32px;
|
||||
}
|
||||
#share {
|
||||
text-align: center;
|
||||
margin-bottom: 10px;
|
||||
font-size: 14px;
|
||||
}
|
||||
#share-copy {
|
||||
background-color: whitesmoke;
|
||||
padding: 4px;
|
||||
border-radius: 2px;
|
||||
}
|
||||
#title {
|
||||
text-align: center;
|
||||
}
|
||||
.container {
|
||||
max-width: 1028px;
|
||||
width: 100%;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.panels {
|
||||
display: flex;
|
||||
flex-flow: row;
|
||||
flex-wrap: wrap;
|
||||
justify-content: center;
|
||||
}
|
||||
button.primary {
|
||||
color: white;
|
||||
background-color: #EEA45D;
|
||||
}
|
||||
button.secondary {
|
||||
color: black;
|
||||
background-color: #F6F6F6;
|
||||
}
|
||||
#featured_history {
|
||||
margin: 4px 30px;
|
||||
}
|
||||
#featured_table {
|
||||
border-collapse: collapse;
|
||||
border: solid 2px whitesmoke;
|
||||
margin-bottom: 20px;
|
||||
table-layout: fixed;
|
||||
}
|
||||
#featured_table div {
|
||||
display: inline-block;
|
||||
padding: 10px;
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
max-width: 200px;
|
||||
max-height: 100px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
#featured_table div:nth-child(even) {
|
||||
background-color: whitesmoke;
|
||||
}
|
||||
#featured_table div:hover {
|
||||
background-color: #EEA45D;
|
||||
}
|
||||
#featured_history img {
|
||||
height: 60px;
|
||||
}
|
||||
#examples table {
|
||||
border-collapse: collapse;
|
||||
font-family: monospace;
|
||||
padding: 8px;
|
||||
background-color: whitesmoke;
|
||||
border-right: solid 4px whitesmoke;
|
||||
border-left: solid 4px whitesmoke;
|
||||
}
|
||||
#examples th {
|
||||
padding: 8px;
|
||||
text-align: left;
|
||||
font-size: 18px;
|
||||
}
|
||||
#examples td {
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#examples tbody tr:nth-child(odd) {
|
||||
background-color: white;
|
||||
}
|
||||
#examples tbody tr:hover {
|
||||
background-color: lightgray;
|
||||
}
|
10
gradio-0.9.9.5/gradio/strings.py
Normal file
10
gradio-0.9.9.5/gradio/strings.py
Normal file
@ -0,0 +1,10 @@
|
||||
en = {
|
||||
"BETA_MESSAGE": "NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com",
|
||||
"RUNNING_LOCALLY": "Running locally at: {}",
|
||||
"NGROK_NO_INTERNET": "Unable to create public link for interface, please check internet connection or try "
|
||||
"restarting python interpreter.",
|
||||
"COLAB_NO_LOCAL": "Cannot display local interface on google colab, public link created.",
|
||||
"PUBLIC_SHARE_TRUE": "To create a public link, set `share=True` in the argument to `launch()`.",
|
||||
"MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {} (may take up to a minute for link to be usable)",
|
||||
"GENERATING_PUBLIC_LINK": "Generating public link (may take a few seconds...):",
|
||||
}
|
107
gradio-0.9.9.5/gradio/tunneling.py
Normal file
107
gradio-0.9.9.5/gradio/tunneling.py
Normal file
@ -0,0 +1,107 @@
|
||||
"""
|
||||
This file provides remote port forwarding functionality using paramiko package,
|
||||
Inspired by: https://github.com/paramiko/paramiko/blob/master/demos/rforward.py
|
||||
"""
|
||||
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
from io import StringIO
|
||||
import warnings
|
||||
import paramiko
|
||||
|
||||
DEBUG_MODE = False
|
||||
|
||||
|
||||
def handler(chan, host, port):
|
||||
sock = socket.socket()
|
||||
try:
|
||||
sock.connect((host, port))
|
||||
except Exception as e:
|
||||
verbose("Forwarding request to {}:{} failed: {}".format(host, port, e))
|
||||
return
|
||||
|
||||
verbose(
|
||||
"Connected! Tunnel open {} -> {} -> {}".format(chan.origin_addr,
|
||||
chan.getpeername(),
|
||||
(host, port))
|
||||
)
|
||||
while True:
|
||||
r, w, x = select.select([sock, chan], [], [])
|
||||
if sock in r:
|
||||
data = sock.recv(1024)
|
||||
if len(data) == 0:
|
||||
break
|
||||
chan.send(data)
|
||||
if chan in r:
|
||||
data = chan.recv(1024)
|
||||
if len(data) == 0:
|
||||
break
|
||||
sock.send(data)
|
||||
chan.close()
|
||||
sock.close()
|
||||
verbose("Tunnel closed from {}".format(chan.origin_addr,))
|
||||
|
||||
|
||||
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport):
|
||||
transport.request_port_forward("", server_port)
|
||||
while True:
|
||||
chan = transport.accept(1000)
|
||||
if chan is None:
|
||||
continue
|
||||
thr = threading.Thread(target=handler, args=(chan, remote_host, remote_port))
|
||||
thr.setDaemon(True)
|
||||
thr.start()
|
||||
|
||||
|
||||
def verbose(s):
|
||||
if DEBUG_MODE:
|
||||
print(s)
|
||||
|
||||
|
||||
def create_tunnel(payload, local_server, local_server_port):
|
||||
client = paramiko.SSHClient()
|
||||
# client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
client.set_missing_host_key_policy(paramiko.WarningPolicy())
|
||||
|
||||
verbose(
|
||||
"Connecting to ssh host {}:{} ...".format(payload["host"], int(payload[
|
||||
"port"]))
|
||||
)
|
||||
try:
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
client.connect(
|
||||
hostname=payload["host"],
|
||||
port=int(payload["port"]),
|
||||
username=payload["user"],
|
||||
pkey=paramiko.RSAKey.from_private_key(StringIO(payload["key"])),
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
"*** Failed to connect to {}:{}: {}}".format(payload["host"],
|
||||
int(payload["port"]), e)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
verbose(
|
||||
"Now forwarding remote port {} to {}:{} ...".format(int(payload[
|
||||
"remote_port"]),
|
||||
local_server,
|
||||
local_server_port)
|
||||
)
|
||||
|
||||
thread = threading.Thread(
|
||||
target=reverse_forward_tunnel,
|
||||
args=(
|
||||
int(payload["remote_port"]),
|
||||
local_server,
|
||||
local_server_port,
|
||||
client.get_transport(),
|
||||
),
|
||||
daemon=True,
|
||||
)
|
||||
thread.start()
|
||||
|
||||
return payload["share_url"]
|
16
gradio-0.9.9.5/gradio/validation_data.py
Normal file
16
gradio-0.9.9.5/gradio/validation_data.py
Normal file
File diff suppressed because one or more lines are too long
25
gradio-0.9.9.5/setup.py
Normal file
25
gradio-0.9.9.5/setup.py
Normal file
@ -0,0 +1,25 @@
|
||||
try:
|
||||
from setuptools import setup
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='gradio',
|
||||
version='0.9.9.5',
|
||||
include_package_data=True,
|
||||
description='Python library for easily interacting with trained machine learning models',
|
||||
author='Abubakar Abid',
|
||||
author_email='a12d@stanford.edu',
|
||||
url='https://github.com/gradio-app/gradio-UI',
|
||||
packages=['gradio'],
|
||||
keywords=['machine learning', 'visualization', 'reproducibility'],
|
||||
install_requires=[
|
||||
'numpy',
|
||||
'requests',
|
||||
'paramiko',
|
||||
'scipy',
|
||||
'IPython',
|
||||
'scikit-image',
|
||||
'analytics-python',
|
||||
],
|
||||
)
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: gradio
|
||||
Version: 0.9.9.1
|
||||
Version: 0.9.9.6
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid
|
||||
|
@ -50,6 +50,7 @@ gradio/static/img/vendor/icon-b.svg
|
||||
gradio/static/img/vendor/icon-c.svg
|
||||
gradio/static/img/vendor/icon-d.svg
|
||||
gradio/static/js/all_io.js
|
||||
gradio/static/js/all_io.js.bak
|
||||
gradio/static/js/gradio.js
|
||||
gradio/static/js/utils.js
|
||||
gradio/static/js/interfaces/input/checkbox.js
|
||||
|
@ -4,4 +4,4 @@ paramiko
|
||||
scipy
|
||||
IPython
|
||||
scikit-image
|
||||
analytics
|
||||
analytics-python
|
||||
|
@ -40,12 +40,6 @@ class AbstractInput(ABC):
|
||||
"""
|
||||
return {"label": self.label}
|
||||
|
||||
def sample_inputs(self):
|
||||
"""
|
||||
An interface can optionally implement a method that sends a list of sample inputs for inference.
|
||||
"""
|
||||
return []
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
By default, no pre-processing is applied to text.
|
||||
@ -67,17 +61,12 @@ class AbstractInput(ABC):
|
||||
|
||||
|
||||
class Sketchpad(AbstractInput):
|
||||
def __init__(self, cast_to="numpy", shape=(28, 28), invert_colors=True,
|
||||
flatten=False, scale=1/255, shift=0,
|
||||
dtype='float64', sample_inputs=None, label=None):
|
||||
def __init__(self, shape=(28, 28), invert_colors=True,
|
||||
flatten=False, label=None):
|
||||
self.image_width = shape[0]
|
||||
self.image_height = shape[1]
|
||||
self.invert_colors = invert_colors
|
||||
self.flatten = flatten
|
||||
self.scale = scale
|
||||
self.shift = shift
|
||||
self.dtype = dtype
|
||||
self.sample_inputs = sample_inputs
|
||||
super().__init__(label)
|
||||
|
||||
@classmethod
|
||||
@ -101,8 +90,6 @@ class Sketchpad(AbstractInput):
|
||||
array = np.array(im).flatten().reshape(1, self.image_width * self.image_height)
|
||||
else:
|
||||
array = np.array(im).flatten().reshape(1, self.image_width, self.image_height)
|
||||
array = array * self.scale + self.shift
|
||||
array = array.astype(self.dtype)
|
||||
return array
|
||||
|
||||
def process_example(self, example):
|
||||
@ -136,8 +123,7 @@ class Webcam(AbstractInput):
|
||||
|
||||
|
||||
class Textbox(AbstractInput):
|
||||
def __init__(self, sample_inputs=None, lines=1, placeholder=None, default=None, label=None, numeric=False):
|
||||
self.sample_inputs = sample_inputs
|
||||
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, label=None):
|
||||
self.lines = lines
|
||||
self.placeholder = placeholder
|
||||
self.default = default
|
||||
@ -227,7 +213,7 @@ class Slider(AbstractInput):
|
||||
@classmethod
|
||||
def get_shortcut_implementations(cls):
|
||||
return {
|
||||
"checkbox": {},
|
||||
"slider": {},
|
||||
}
|
||||
|
||||
|
||||
@ -283,6 +269,8 @@ class Image(AbstractInput):
|
||||
|
||||
|
||||
class Microphone(AbstractInput):
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def preprocess(self, inp):
|
||||
"""
|
||||
|
@ -281,7 +281,6 @@ class Interface:
|
||||
from_ipynb = get_ipython()
|
||||
if "google.colab" in str(from_ipynb):
|
||||
is_colab = True
|
||||
print("Google colab notebook detected.")
|
||||
except NameError:
|
||||
data = {'error': 'NameError in launch method'}
|
||||
try:
|
||||
@ -307,10 +306,10 @@ class Interface:
|
||||
print(strings.en["RUNNING_LOCALLY"].format(path_to_local_server))
|
||||
else:
|
||||
if debug:
|
||||
print("This cell will run indefinitely so that you can see errors and logs. To turn off, "
|
||||
"set debug=False in launch().")
|
||||
print("Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
|
||||
"To turn off, set debug=False in launch().")
|
||||
else:
|
||||
print("To show errors in colab notebook, set debug=True in launch()")
|
||||
print("Colab notebook detected. To show errors in colab notebook, set debug=True in launch()")
|
||||
|
||||
if share:
|
||||
try:
|
||||
@ -331,6 +330,7 @@ class Interface:
|
||||
is_colab
|
||||
): # For a colab notebook, create a public link even if share is False.
|
||||
share_url = networking.setup_tunnel(server_port)
|
||||
print("Running on External URL:", share_url)
|
||||
if self.verbose:
|
||||
print(strings.en["COLAB_NO_LOCAL"])
|
||||
else: # If it's not a colab notebook and share=False, print a message telling them about the share option.
|
||||
|
@ -10,6 +10,7 @@ import json
|
||||
from gradio import preprocessing_utils
|
||||
import datetime
|
||||
import operator
|
||||
from numbers import Number
|
||||
|
||||
# Where to find the static resources associated with each template.
|
||||
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
|
||||
@ -53,8 +54,8 @@ class Label(AbstractOutput):
|
||||
super().__init__(label)
|
||||
|
||||
def postprocess(self, prediction):
|
||||
if isinstance(prediction, str):
|
||||
return {"label": prediction}
|
||||
if isinstance(prediction, str) or isinstance(prediction, Number):
|
||||
return {"label": str(prediction)}
|
||||
elif isinstance(prediction, dict):
|
||||
sorted_pred = sorted(
|
||||
prediction.items(),
|
||||
@ -104,15 +105,11 @@ class KeyValues(AbstractOutput):
|
||||
|
||||
|
||||
class Textbox(AbstractOutput):
|
||||
def __init__(self, lines=1, placeholder=None, label=None):
|
||||
self.lines = lines
|
||||
self.placeholder = placeholder
|
||||
def __init__(self, label=None):
|
||||
super().__init__(label)
|
||||
|
||||
def get_template_context(self):
|
||||
return {
|
||||
"lines": self.lines,
|
||||
"placeholder": self.placeholder,
|
||||
**super().get_template_context()
|
||||
}
|
||||
|
||||
@ -121,7 +118,6 @@ class Textbox(AbstractOutput):
|
||||
return {
|
||||
"text": {},
|
||||
"number": {},
|
||||
"textbox": {"lines": 7}
|
||||
}
|
||||
|
||||
def postprocess(self, prediction):
|
||||
@ -133,7 +129,7 @@ class Textbox(AbstractOutput):
|
||||
|
||||
|
||||
class Image(AbstractOutput):
|
||||
def __init__(self, label=None, plot=False):
|
||||
def __init__(self, plot=False, label=None):
|
||||
self.plot = plot
|
||||
super().__init__(label)
|
||||
|
||||
|
@ -31,10 +31,18 @@
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
}
|
||||
.output_panel {
|
||||
position: relative;
|
||||
}
|
||||
.loading {
|
||||
position: absolute;
|
||||
top: 10px;
|
||||
left: 10px;
|
||||
margin-left: auto;
|
||||
z-index: 1;
|
||||
}
|
||||
.loading img {
|
||||
width: 50px;
|
||||
display: none;
|
||||
}
|
||||
.panel_buttons {
|
||||
|
@ -1,5 +1,4 @@
|
||||
.output_text {
|
||||
resize: none;
|
||||
width: 100%;
|
||||
font-size: 18px;
|
||||
outline: none;
|
||||
@ -8,4 +7,9 @@
|
||||
border: solid 1px black;
|
||||
box-sizing: border-box;
|
||||
padding: 4px;
|
||||
min-height: 30px;
|
||||
font-family: monospace;
|
||||
white-space: pre-wrap; /* CSS3 */
|
||||
white-space: -moz-pre-wrap; /* Firefox */
|
||||
word-wrap: break-word; /* IE */
|
||||
}
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 3.0 KiB After Width: | Height: | Size: 3.7 KiB |
@ -22,8 +22,7 @@ var io_master_template = {
|
||||
this.target.find(".loading").removeClass("invisible");
|
||||
this.target.find(".loading_in_progress").show();
|
||||
this.target.find(".loading_failed").hide();
|
||||
this.target.find(".output_interface").addClass("invisible");
|
||||
this.target.find(".output_interfaces .panel_header").addClass("invisible");
|
||||
this.target.find(".output_interfaces").css("opacity", 0.5);
|
||||
}
|
||||
this.fn(this.last_input).then((output) => {
|
||||
io.output(output);
|
||||
@ -47,11 +46,14 @@ var io_master_template = {
|
||||
}
|
||||
|
||||
if (this.config.live) {
|
||||
this.gather();
|
||||
var io = this;
|
||||
var refresh_lag = this.config.refresh_lag || 0;
|
||||
window.setTimeout(function() {
|
||||
io.gather();
|
||||
}, refresh_lag);
|
||||
} else {
|
||||
this.target.find(".loading").addClass("invisible");
|
||||
this.target.find(".output_interface").removeClass("invisible");
|
||||
this.target.find(".output_interfaces .panel_header").removeClass("invisible");
|
||||
this.target.find(".output_interfaces").css("opacity", 1);
|
||||
}
|
||||
},
|
||||
flag: function(message) {
|
||||
|
@ -11,7 +11,7 @@ function gradio(config, fn, target) {
|
||||
</div>
|
||||
</div>
|
||||
<div class="panel output_panel">
|
||||
<div class="loading interface invisible">
|
||||
<div class="loading invisible">
|
||||
<img class="loading_in_progress" src="static/img/logo_loading.gif">
|
||||
<img class="loading_failed" src="static/img/logo_error.png">
|
||||
</div>
|
||||
@ -114,7 +114,7 @@ function gradio(config, fn, target) {
|
||||
target.find(".flag_message").empty();
|
||||
target.find(".loading").addClass("invisible");
|
||||
target.find(".loading_time").text("");
|
||||
target.find(".output_interface").removeClass("invisible");
|
||||
target.find(".output_interfaces").css("opacity", 1);
|
||||
io_master.last_input = null;
|
||||
io_master.last_output = null;
|
||||
});
|
||||
|
@ -1,13 +1,7 @@
|
||||
const textbox_output = {
|
||||
html: `<textarea readonly class="output_text"></textarea>`,
|
||||
html: `<div class="output_text"></div>`,
|
||||
init: function(opts) {
|
||||
if (opts.lines) {
|
||||
this.target.find(".output_text").attr("rows", opts.lines).css("height", "auto");
|
||||
this.target.css("height", "auto");
|
||||
}
|
||||
if (opts.placeholder) {
|
||||
this.target.find(".output_text").attr("placeholder", opts.placeholder)
|
||||
}
|
||||
this.target.css("height", "auto");
|
||||
},
|
||||
output: function(data) {
|
||||
this.target.find(".output_text").text(data);
|
||||
|
@ -31,7 +31,7 @@
|
||||
<a href="https://gradio.app"><img src="../static/img/logo_inline.png" /></a>
|
||||
</nav>
|
||||
<div id="share" class="invisible">
|
||||
Live at <a id="share-link"></a>.
|
||||
Live at <a id="share-link" target="_blank"></a>.
|
||||
<button id="share-copy">Copy Link</button>
|
||||
</div>
|
||||
<div class="container">
|
||||
@ -40,7 +40,7 @@
|
||||
</div>
|
||||
<div id="interface_target" class="container"></div>
|
||||
<div id="examples" class="container invisible">
|
||||
<h3>Examples</h3>
|
||||
<h3>Examples <small>(click to load)</small></h3>
|
||||
<table>
|
||||
</table>
|
||||
</div>
|
||||
|
3
setup.py
3
setup.py
@ -5,7 +5,7 @@ except ImportError:
|
||||
|
||||
setup(
|
||||
name='gradio',
|
||||
version='0.9.9.1',
|
||||
version='0.9.9.6',
|
||||
include_package_data=True,
|
||||
description='Python library for easily interacting with trained machine learning models',
|
||||
author='Abubakar Abid',
|
||||
@ -20,5 +20,6 @@ setup(
|
||||
'scipy',
|
||||
'IPython',
|
||||
'scikit-image',
|
||||
'analytics-python',
|
||||
],
|
||||
)
|
||||
|
@ -7,13 +7,13 @@ import gradio.outputs
|
||||
|
||||
class TestInterface(unittest.TestCase):
|
||||
def test_input_output_mapping(self):
|
||||
io = gr.Interface(inputs='SketCHPad', outputs='textBOX', fn=lambda x: x)
|
||||
io = gr.Interface(inputs='SketCHPad', outputs='TexT', fn=lambda x: x)
|
||||
self.assertIsInstance(io.input_interfaces[0], gradio.inputs.Sketchpad)
|
||||
self.assertIsInstance(io.output_interfaces[0], gradio.outputs.Textbox)
|
||||
|
||||
def test_input_interface_is_instance(self):
|
||||
inp = gradio.inputs.Image()
|
||||
io = gr.Interface(inputs=inp, outputs='textBOX', fn=lambda x: x)
|
||||
io = gr.Interface(inputs=inp, outputs='teXT', fn=lambda x: x)
|
||||
self.assertEqual(io.input_interfaces[0], inp)
|
||||
|
||||
def test_output_interface_is_instance(self):
|
||||
@ -24,7 +24,7 @@ class TestInterface(unittest.TestCase):
|
||||
def test_prediction(self):
|
||||
def model(x):
|
||||
return 2*x
|
||||
io = gr.Interface(inputs='textbox', outputs='textBOX', fn=model)
|
||||
io = gr.Interface(inputs='textbox', outputs='TEXT', fn=model)
|
||||
self.assertEqual(io.predict[0](11), 22)
|
||||
|
||||
|
||||
|
@ -27,7 +27,7 @@ class TestGetAvailablePort(unittest.TestCase):
|
||||
s.bind((networking.LOCALHOST_NAME, port)) # Bind to the port
|
||||
new_port = networking.get_first_available_port(initial, final)
|
||||
s.close()
|
||||
self.assertFalse(port==new_port)
|
||||
self.assertFalse(port == new_port)
|
||||
|
||||
|
||||
# class TestSetSampleData(unittest.TestCase):
|
||||
|
Loading…
Reference in New Issue
Block a user