This commit is contained in:
Ali Abid 2020-07-14 15:43:56 -07:00
commit 066b16fa42
3 changed files with 43 additions and 23 deletions

View File

@ -2,12 +2,12 @@
# Welcome to `gradio` :rocket: # Welcome to `gradio` :rocket:
Quickly create customizable UI components around your TensorFlow or PyTorch models, or even arbitrary Python functions. Mix and match components to support any combination of inputs and outputs. Gradio makes it easy for you to "play around" with your model in your browser by dragging-and-dropping in your own images (or pasting your own text, recording your own voice, etc.) and seeing what the model outputs. You can also generate a share link which allows anyone, anywhere to use the interface as the model continues to run on your machine. Our core library is free and open-source! Take a look:
<p align="center"> <p align="center">
<img src="https://i.ibb.co/m0skD0j/bert.gif" alt="drawing"/> <img src="https://i.ibb.co/m0skD0j/bert.gif" alt="drawing"/>
</p> </p>
At Gradio, we often try to understand what inputs a model is particularly sensitive to. To help facilitate this, we've developed and open-sourced `gradio`, a python library that allows you to quickly create input and output interfaces over trained models to make it easy for you to "play around" with your model in your browser by dragging-and-dropping in your own images (or pasting your own text, recording your own voice, etc.) and seeing what the model outputs. `gradio` can also generate a share link which allows anyone, anywhere to use the interface as the model continues to run on your machine.
Gradio is useful for: Gradio is useful for:
* Creating demos of your machine learning code for clients / collaborators / users * Creating demos of your machine learning code for clients / collaborators / users
* Getting feedback on model performance from users * Getting feedback on model performance from users

View File

@ -4,15 +4,18 @@ This module defines various classes that can serve as the `input` to an interfac
automatically added to a registry, which allows them to be easily referenced in other parts of the code. automatically added to a registry, which allows them to be easily referenced in other parts of the code.
""" """
from abc import ABC, abstractmethod import datetime
from gradio import preprocessing_utils, validation_data import json
import numpy as np import os
import PIL.Image, PIL.ImageOps
import time import time
import warnings import warnings
import json from abc import ABC, abstractmethod
import datetime
import os import numpy as np
import PIL.Image
import PIL.ImageOps
import scipy.io.wavfile
from gradio import preprocessing_utils, validation_data
# Where to find the static resources associated with each template. # Where to find the static resources associated with each template.
# BASE_INPUT_INTERFACE_TEMPLATE_PATH = 'static/js/interfaces/input/{}.js' # BASE_INPUT_INTERFACE_TEMPLATE_PATH = 'static/js/interfaces/input/{}.js'
@ -269,20 +272,32 @@ class Image(AbstractInput):
class Microphone(AbstractInput): class Microphone(AbstractInput):
def __init__(self, label=None): def __init__(self, preprocessing=None, label=None):
super().__init__(label) super().__init__(label)
if preprocessing is None or preprocessing == "mfcc":
self.preprocessing = preprocessing
else:
raise ValueError("unexpected value for preprocessing", preprocessing)
@classmethod
def get_shortcut_implementations(cls):
return {
"microphone": {},
}
def preprocess(self, inp): def preprocess(self, inp):
""" """
By default, no pre-processing is applied to a microphone input file By default, no pre-processing is applied to a microphone input file
""" """
file_obj = preprocessing_utils.decode_base64_to_wav_file(inp) file_obj = preprocessing_utils.decode_base64_to_wav_file(inp)
mfcc_array = preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name) if self.preprocessing == "mfcc":
return mfcc_array return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name)
return signal
# Automatically adds all shortcut implementations in AbstractInput into a dictionary. # Automatically adds all shortcut implementations in AbstractInput into a dictionary.
shortcuts = {} shortcuts = {}
for cls in AbstractInput.__subclasses__(): for cls in AbstractInput.__subclasses__():
for shortcut, parameters in cls.get_shortcut_implementations().items(): for shortcut, parameters in cls.get_shortcut_implementations().items():
shortcuts[shortcut] = cls(**parameters) shortcuts[shortcut] = cls(**parameters)

View File

@ -116,6 +116,17 @@ def get_first_available_port(initial, final):
) )
) )
def send_prediction_analytics(interface):
data = {'input_interface': interface.input_interfaces,
'output_interface': interface.output_interfaces,
}
try:
requests.post(
analytics_url + 'gradio-prediction-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME): def serve_files_in_background(interface, port, directory_to_serve=None, server_name=LOCALHOST_NAME):
class HTTPHandler(SimpleHTTPRequestHandler): class HTTPHandler(SimpleHTTPRequestHandler):
@ -160,17 +171,11 @@ def serve_files_in_background(interface, port, directory_to_serve=None, server_n
# f.write(json.dumps(output_flag)) # f.write(json.dumps(output_flag))
# f.write("\n") # f.write("\n")
# Prepare return json dictionary.
self.wfile.write(json.dumps(output).encode()) self.wfile.write(json.dumps(output).encode())
data = {'input_interface': interface.input_interfaces,
'output_interface': interface.output_interfaces, analytics_thread = threading.Thread(
} target=send_prediction_analytics, args=[interface])
try: analytics_thread.start()
requests.post(
analytics_url + 'gradio-prediction-analytics/',
data=data)
except requests.ConnectionError:
pass # do not push analytics if no network
elif self.path == "/api/flag/": elif self.path == "/api/flag/":
self._set_headers() self._set_headers()