big changes

This commit is contained in:
Ali Abid 2020-08-05 10:42:52 -07:00
parent 82c2b5afb0
commit 63312c1e7b
69 changed files with 2240 additions and 610 deletions

View File

@ -0,0 +1,38 @@
class Component():
"""
A class for defining the methods that all gradio input and output components should have.
"""
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def preprocess(self, x):
"""
Any preprocessing needed to be performed on function input.
"""
return x
def postprocess(self, y):
"""
Any postprocessing needed to be performed on function output.
"""
return y
def process_example(self, example):
"""
Proprocess example for UI
"""
return example
@classmethod
def get_shortcut_implementations(cls):
"""
Return dictionary of shortcut implementations
"""
return {}

View File

@ -1,6 +1,6 @@
import json import json
from gradio.inputs import AbstractInput from gradio.inputs import InputComponent
from gradio.outputs import AbstractOutput from gradio.outputs import OutputComponent
from gradio.interface import Interface from gradio.interface import Interface
import inspect import inspect
@ -44,8 +44,8 @@ def document(cls_set):
docset.append(inp) docset.append(inp)
return docset return docset
inputs = document(AbstractInput.__subclasses__()) inputs = document(InputComponent.__subclasses__())
outputs = document(AbstractOutput.__subclasses__()) outputs = document(OutputComponent.__subclasses__())
interface_params = get_params(Interface.__init__) interface_params = get_params(Interface.__init__)
interface = { interface = {
"doc": inspect.getdoc(Interface), "doc": inspect.getdoc(Interface),

View File

@ -1,6 +1,6 @@
""" """
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
`AbstractInput`, and each class must define a path to its template. All of the subclasses of `AbstractInput` are `InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code. automatically added to a registry, which allows them to be easily referenced in other parts of the code.
""" """
@ -9,80 +9,55 @@ import json
import os import os
import time import time
import warnings import warnings
from abc import ABC, abstractmethod from gradio.component import Component
import numpy as np import numpy as np
import PIL.Image import PIL.Image
import PIL.ImageOps import PIL.ImageOps
import scipy.io.wavfile import scipy.io.wavfile
from gradio import preprocessing_utils, validation_data from gradio import preprocessing_utils, validation_data
import pandas as pd
import math
import tempfile
# Where to find the static resources associated with each template. class InputComponent(Component):
# BASE_INPUT_INTERFACE_TEMPLATE_PATH = 'static/js/interfaces/input/{}.js'
BASE_INPUT_INTERFACE_JS_PATH = 'static/js/interfaces/input/{}.js'
class AbstractInput(ABC):
""" """
An abstract class for defining the methods that all gradio inputs should have. Input Component. All input components subclass this.
When this is subclassed, it is automatically added to the registry
""" """
def __init__(self, label):
self.label = label
def get_validation_inputs(self):
"""
An interface can optionally implement a method that returns a list of examples inputs that it should be able to
accept and preprocess for validation purposes.
"""
return []
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def preprocess(self, inp):
"""
By default, no pre-processing is applied to text.
"""
return inp
def process_example(self, example):
"""
Proprocess example for UI
"""
return example
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_all_shortcut_implementations(cls):
""" shortcuts = {}
Return dictionary of shortcut implementations for sub_cls in cls.__subclasses__():
""" for shortcut, parameters in sub_cls.get_shortcut_implementations().items():
return {} shortcuts[shortcut] = (sub_cls, parameters)
return shortcuts
class Textbox(AbstractInput): class Textbox(InputComponent):
""" """
Component creates a textbox for user to enter input. Provides a string (or number is `is_numeric` is true) as an argument to the wrapped function. Component creates a textbox for user to enter input. Provides a string (or number is `type` is "float") as an argument to the wrapped function.
Input type: str Input type: str
""" """
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, label=None): def __init__(self, lines=1, placeholder=None, default=None, numeric=False, type="str", label=None):
''' '''
Parameters: Parameters:
lines (int): number of line rows to provide in textarea. lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea. placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea. default (str): default text to provide in textarea.
numeric (bool): whether the input should be parsed as a number instead of a string. numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): Type of value to be returned by component. "str" returns a string, "number" returns a float value.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.lines = lines self.lines = lines
self.placeholder = placeholder self.placeholder = placeholder
self.default = default self.default = default
self.numeric = numeric if numeric:
warnings.warn("The 'numeric' parameter has been deprecated. Set parameter 'type' to 'number' instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -98,20 +73,20 @@ class Textbox(AbstractInput):
return { return {
"text": {}, "text": {},
"textbox": {"lines": 7}, "textbox": {"lines": 7},
"number": {"numeric": True} "number": {"type": "number"}
} }
def preprocess(self, inp): def preprocess(self, x):
""" if self.type == "str":
Cast type of input return x
""" elif self.type == "number":
if self.numeric: return float(x)
return float(inp)
else: else:
return inp raise ValueError("Unknown type: " + self.type + ". Please choose from: 'str', 'number'.")
class Slider(AbstractInput):
class Slider(InputComponent):
""" """
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function. Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float Input type: float
@ -128,6 +103,11 @@ class Slider(AbstractInput):
''' '''
self.minimum = minimum self.minimum = minimum
self.maximum = maximum self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 1)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default self.default = minimum if default is None else default
super().__init__(label) super().__init__(label)
@ -135,6 +115,7 @@ class Slider(AbstractInput):
return { return {
"minimum": self.minimum, "minimum": self.minimum,
"maximum": self.maximum, "maximum": self.maximum,
"step": self.step,
"default": self.default, "default": self.default,
**super().get_template_context() **super().get_template_context()
} }
@ -146,7 +127,7 @@ class Slider(AbstractInput):
} }
class Checkbox(AbstractInput): class Checkbox(InputComponent):
""" """
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function. Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool Input type: bool
@ -166,19 +147,21 @@ class Checkbox(AbstractInput):
} }
class CheckboxGroup(AbstractInput): class CheckboxGroup(InputComponent):
""" """
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function. Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: List[str] Input type: Union[List[str], List[int]]
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="choices", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -187,20 +170,30 @@ class CheckboxGroup(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Radio(AbstractInput):
class Radio(InputComponent):
""" """
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: str Input type: Union[str, int]
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="value", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -209,20 +202,29 @@ class Radio(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Dropdown(AbstractInput): class Dropdown(InputComponent):
""" """
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: str Input type: str
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="value", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -231,69 +233,210 @@ class Dropdown(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Image(AbstractInput):
class Image(InputComponent):
""" """
Component creates an image upload box with editing capabilities. Provides numpy array of shape `(width, height, 3)` if `image_mode` is "RGB" as an argument to the wrapped function. Provides numpy array of shape `(width, height)` if `image_mode` is "L" as an argument to the wrapped function. Component creates an image upload box with editing capabilities.
Input type: numpy.array Input type: Union[numpy.array, PIL.Image, str]
""" """
def __init__(self, shape=None, image_mode='RGB', label=None): def __init__(self, shape=None, image_mode='RGB', source="upload", tools=["brush", "crop", "rotate", "undo", "filter"], type="numpy", label=None):
''' '''
Parameters: Parameters:
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size. shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white. image_mode (str): "RGB" if color, or "L" if black and white.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tools (List[str]): Tools available to user to edit images. "brush" allows user to draw on image, "crop" allows user to select portion of image, "rotate" allows user to rotate or flip image, "undo" allows user to revert changes, "filter" allows user to apply filters on image.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3), "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name.
label (str): component name in interface. label (str): component name in interface.
''' '''
if shape is None: self.shape = shape
self.image_width, self.image_height = None, None
else:
self.image_width = shape[0]
self.image_height = shape[1]
self.image_mode = image_mode self.image_mode = image_mode
self.source = source
self.tools = tools
self.type = type
super().__init__(label) super().__init__(label)
def get_validation_inputs(self):
return validation_data.BASE64_COLOR_IMAGES
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"image": {}, "image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "tools": ["brush"]},
"paint": {"source": "canvas", "tools": ["brush", "undo"]},
} }
def get_template_context(self): def get_template_context(self):
return { return {
"image_mode": self.image_mode,
"source": self.source,
"tools": self.tools,
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, inp): def preprocess(self, x):
""" im = preprocessing_utils.decode_base64_to_image(x)
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
"""
im = preprocessing_utils.decode_base64_to_image(inp)
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
im = im.convert(self.image_mode) im = im.convert(self.image_mode)
image_width, image_height = self.image_width, self.image_height image_width, image_height = self.image_width, self.image_height
if image_width is None: if self.shape is not None:
image_width = im.size[0] im = preprocessing_utils.resize_and_crop(
if image_height is None: im, (self.shape[0], self.shape[1]))
image_height = im.size[1] if self.type == "pil":
im = preprocessing_utils.resize_and_crop( return im
im, (image_width, image_height)) elif self.type == "numpy":
return np.array(im) return np.array(im)
elif self.type == "file":
file_obj = tempfile.NamedTemporaryFile()
im.save(file_obj.name)
return file_obj
def process_example(self, example): def process_example(self, example):
if os.path.exists(example): if os.path.exists(example):
return preprocessing_utils.convert_file_to_base64(example) return preprocessing_utils.encode_file_to_base64(example)
else: else:
return example return example
class Sketchpad(AbstractInput): class Audio(InputComponent):
""" """
Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function. Component accepts audio input files. Provides numpy array of shape `(samples, 2)` as an argument to the wrapped function.
Input type: Union[Tuple[int, numpy.array], str, numpy.array]
"""
def __init__(self, source="upload", type="numpy", label=None):
'''
Parameters:
source (str): Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input.
type (str): Type of value to be returned by component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file object whose path can be retrieved by file_obj.name, "mfcc" returns the mfcc coefficients of the input audio.
label (str): component name in interface.
'''
self.source = source
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"audio": {},
"microphone": {"source": "microphone"}
}
def preprocess(self, x):
"""
By default, no pre-processing is applied to a microphone input file
"""
file_obj = preprocessing_utils.decode_base64_to_file(x)
if self.type == "file":
return file_obj
elif self.type == "numpy":
return scipy.io.wavfile.read(file_obj.name)
elif self.type == "mfcc":
return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
class File(InputComponent):
"""
Component accepts generic file uploads.
Input type: Union[str, bytes]
"""
def __init__(self, type="file", label=None):
'''
Parameters:
type (str): Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object.
label (str): component name in interface.
'''
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
def preprocess(self, x):
if self.type == "file":
return preprocessing_utils.decode_base64_to_file(x)
elif self.type == "bytes":
return preprocessing_utils.decode_base64_to_binary(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'file', 'bytes'.")
class Dataframe(InputComponent):
"""
Component accepts 2D input through a spreadsheet interface.
Input type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, row_count=3, col_count=3, datatype="str", type="pandas", label=None):
'''
Parameters:
headers (List[str]): Header names to dataframe.
row_count (int): Limit number of rows for input.
col_count (int): Limit number of columns for input. If equal to 1, return data will be one-dimensional. Ignored if `headers` is provided.
datatype (Union[str, List[str]]): Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", and "date".
type (str): Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array.
label (str): component name in interface.
'''
self.headers = headers
self.datatype = datatype
self.row_count = row_count
self.col_count = len(headers) if headers else col_count
self.type = type
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
"datatype": self.datatype,
"row_count": self.row_count,
"col_count": self.col_count,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {"type": "pandas"},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array", "col_count": 1},
}
def preprocess(self, x):
if self.type == "pandas":
if self.headers:
return pd.DataFrame(x, columns=self.headers)
else:
return pd.DataFrame(x)
if self.col_count == 1:
x = x[0]
if self.type == "numpy":
return np.array(x)
elif self.type == "array":
return x
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'pandas', 'numpy', 'array'.")
# DEPRECATED COMPONENTS
class Sketchpad(InputComponent):
"""
DEPRECATED. Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function.
Input type: numpy.array Input type: numpy.array
""" """
@ -306,23 +449,18 @@ class Sketchpad(AbstractInput):
flatten (bool): whether to reshape the numpy array to a single dimension. flatten (bool): whether to reshape the numpy array to a single dimension.
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Sketchpad has been deprecated. Please use 'Image' component to generate a sketchpad. The string shorcut 'sketchpad' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0] self.image_width = shape[0]
self.image_height = shape[1] self.image_height = shape[1]
self.invert_colors = invert_colors self.invert_colors = invert_colors
self.flatten = flatten self.flatten = flatten
super().__init__(label) super().__init__(label)
@classmethod def preprocess(self, x):
def get_shortcut_implementations(cls):
return {
"sketchpad": {},
}
def preprocess(self, inp):
""" """
Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28 Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
""" """
im_transparent = preprocessing_utils.decode_base64_to_image(inp) im_transparent = preprocessing_utils.decode_base64_to_image(x)
# Create a white background for the alpha channel # Create a white background for the alpha channel
im = PIL.Image.new("RGBA", im_transparent.size, "WHITE") im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")
im.paste(im_transparent, (0, 0), im_transparent) im.paste(im_transparent, (0, 0), im_transparent)
@ -339,12 +477,12 @@ class Sketchpad(AbstractInput):
return array return array
def process_example(self, example): def process_example(self, example):
return preprocessing_utils.convert_file_to_base64(example) return preprocessing_utils.encode_file_to_base64(example)
class Webcam(AbstractInput): class Webcam(InputComponent):
""" """
Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function. DEPRECATED. Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function.
Input type: numpy.array Input type: numpy.array
""" """
@ -354,34 +492,26 @@ class Webcam(AbstractInput):
shape (Tuple[int, int]): shape to crop and resize image to. shape (Tuple[int, int]): shape to crop and resize image to.
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Webcam has been deprecated. Please use 'Image' component to generate a webcam. The string shorcut 'webcam' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0] self.image_width = shape[0]
self.image_height = shape[1] self.image_height = shape[1]
self.num_channels = 3 self.num_channels = 3
super().__init__(label) super().__init__(label)
def get_validation_inputs(self): def preprocess(self, x):
return validation_data.BASE64_COLOR_IMAGES
@classmethod
def get_shortcut_implementations(cls):
return {
"webcam": {},
}
def preprocess(self, inp):
""" """
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48 Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
""" """
im = preprocessing_utils.decode_base64_to_image(inp) im = preprocessing_utils.decode_base64_to_image(x)
im = im.convert('RGB') im = im.convert('RGB')
im = preprocessing_utils.resize_and_crop( im = preprocessing_utils.resize_and_crop(
im, (self.image_width, self.image_height)) im, (self.image_width, self.image_height))
return np.array(im) return np.array(im)
class Microphone(AbstractInput): class Microphone(InputComponent):
""" """
Component creates a microphone element for audio inputs. Provides numpy array of shape `(samples, 2)` as an argument to the wrapped function. DEPRECATED. Component creates a microphone element for audio inputs.
Input type: numpy.array Input type: numpy.array
""" """
@ -391,6 +521,7 @@ class Microphone(AbstractInput):
preprocessing (Union[str, Callable]): preprocessing to apply to input preprocessing (Union[str, Callable]): preprocessing to apply to input
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Microphone has been deprecated. Please use 'Audio' component to generate a microphone. The string shorcut 'microphone' has been moved to the Audio component.", DeprecationWarning)
super().__init__(label) super().__init__(label)
if preprocessing is None or preprocessing == "mfcc": if preprocessing is None or preprocessing == "mfcc":
self.preprocessing = preprocessing self.preprocessing = preprocessing
@ -398,25 +529,13 @@ class Microphone(AbstractInput):
raise ValueError( raise ValueError(
"unexpected value for preprocessing", preprocessing) "unexpected value for preprocessing", preprocessing)
@classmethod def preprocess(self, x):
def get_shortcut_implementations(cls):
return {
"microphone": {},
}
def preprocess(self, inp):
""" """
By default, no pre-processing is applied to a microphone input file By default, no pre-processing is applied to a microphone input file
""" """
file_obj = preprocessing_utils.decode_base64_to_wav_file(inp) file_obj = preprocessing_utils.decode_base64_to_file(x)
if self.preprocessing == "mfcc": if self.preprocessing == "mfcc":
return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name) return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name) _, signal = scipy.io.wavfile.read(file_obj.name)
return signal return signal
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
shortcuts = {}
for cls in AbstractInput.__subclasses__():
for shortcut, parameters in cls.get_shortcut_implementations().items():
shortcuts[shortcut] = cls(**parameters)

View File

@ -7,8 +7,8 @@ import tempfile
import traceback import traceback
import webbrowser import webbrowser
import gradio.inputs from gradio.inputs import InputComponent
import gradio.outputs from gradio.outputs import OutputComponent
from gradio import networking, strings from gradio import networking, strings
from distutils.version import StrictVersion from distutils.version import StrictVersion
import pkg_resources import pkg_resources
@ -45,8 +45,8 @@ class Interface:
""" """
Parameters: Parameters:
fn (Callable): the function to wrap an interface around. fn (Callable): the function to wrap an interface around.
inputs (Union[str, List[Union[str, AbstractInput]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn. inputs (Union[str, List[Union[str, InputComponent]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn.
outputs (Union[str, List[Union[str, AbstractOutput]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn. outputs (Union[str, List[Union[str, OutputComponent]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn.
live (bool): whether the interface should automatically reload on change. live (bool): whether the interface should automatically reload on change.
capture_session (bool): if True, captures the default graph and session (needed for Tensorflow 1.x) capture_session (bool): if True, captures the default graph and session (needed for Tensorflow 1.x)
title (str): a title for the interface; if provided, appears above the input and output components. title (str): a title for the interface; if provided, appears above the input and output components.
@ -55,22 +55,24 @@ class Interface:
""" """
def get_input_instance(iface): def get_input_instance(iface):
if isinstance(iface, str): if isinstance(iface, str):
return gradio.inputs.shortcuts[iface.lower()] shortcut = InputComponent.get_all_shortcut_implementations()[iface]
elif isinstance(iface, gradio.inputs.AbstractInput): return shortcut[0](**shortcut[1])
elif isinstance(iface, InputComponent):
return iface return iface
else: else:
raise ValueError("Input interface must be of type `str` or " raise ValueError("Input interface must be of type `str` or "
"`AbstractInput`") "`InputComponent`")
def get_output_instance(iface): def get_output_instance(iface):
if isinstance(iface, str): if isinstance(iface, str):
return gradio.outputs.shortcuts[iface.lower()] shortcut = OutputComponent.get_all_shortcut_implementations()[iface]
elif isinstance(iface, gradio.outputs.AbstractOutput): return shortcut[0](**shortcut[1])
elif isinstance(iface, OutputComponent):
return iface return iface
else: else:
raise ValueError( raise ValueError(
"Output interface must be of type `str` or " "Output interface must be of type `str` or "
"`AbstractOutput`" "`OutputComponent`"
) )
if isinstance(inputs, list): if isinstance(inputs, list):
self.input_interfaces = [get_input_instance(i) for i in inputs] self.input_interfaces = [get_input_instance(i) for i in inputs]

View File

@ -1,61 +1,47 @@
""" """
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are `OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code. automatically added to a registry, which allows them to be easily referenced in other parts of the code.
""" """
from abc import ABC, abstractmethod from gradio.component import Component
import numpy as np import numpy as np
import json import json
from gradio import preprocessing_utils from gradio import preprocessing_utils
import datetime import datetime
import operator import operator
from numbers import Number from numbers import Number
import warnings
import tempfile
import scipy
# Where to find the static resources associated with each template. class OutputComponent(Component):
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
class AbstractOutput(ABC):
""" """
An abstract class for defining the methods that all gradio inputs should have. Output Component. All output components subclass this.
When this is subclassed, it is automatically added to the registry
""" """
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def postprocess(self, prediction):
"""
Any postprocessing needed to be performed on function output.
"""
return prediction
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_all_shortcut_implementations(cls):
""" shortcuts = {}
Return dictionary of shortcut implementations for sub_cls in cls.__subclasses__():
""" for shortcut, parameters in sub_cls.get_shortcut_implementations().items():
return {} shortcuts[shortcut] = (sub_cls, parameters)
return shortcuts
class Textbox(AbstractOutput): class Textbox(OutputComponent):
''' '''
Component creates a textbox to render output text or number. Component creates a textbox to render output text or number.
Output type: str Output type: Union[str, float, int]
''' '''
def __init__(self, label=None): def __init__(self, type="str", label=None):
''' '''
Parameters: Parameters:
type (str): Type of value to be passed to component. "str" expects a string, "number" expects a float value.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -66,20 +52,21 @@ class Textbox(AbstractOutput):
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"text": {}, "text": {"type": "str"},
"textbox": {}, "textbox": {"type": "str"},
"number": {}, "number": {"type": "number"},
} }
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float): if self.type == "str":
return str(prediction) return y
elif self.type == "number":
return str(y)
else: else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or" raise ValueError("Unknown type: " + self.type + ". Please choose from: 'str', 'number'")
"an int/float that can be converted to a string.")
class Label(AbstractOutput): class Label(OutputComponent):
''' '''
Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1. Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1.
Output type: Union[Dict[str, float], str, int, float] Output type: Union[Dict[str, float], str, int, float]
@ -98,12 +85,12 @@ class Label(AbstractOutput):
self.num_top_classes = num_top_classes self.num_top_classes = num_top_classes
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, Number): if isinstance(y, str) or isinstance(y, Number):
return {"label": str(prediction)} return {"label": str(y)}
elif isinstance(prediction, dict): elif isinstance(y, dict):
sorted_pred = sorted( sorted_pred = sorted(
prediction.items(), y.items(),
key=operator.itemgetter(1), key=operator.itemgetter(1),
reverse=True reverse=True
) )
@ -118,8 +105,8 @@ class Label(AbstractOutput):
} for pred in sorted_pred } for pred in sorted_pred
] ]
} }
elif isinstance(prediction, int) or isinstance(prediction, float): elif isinstance(y, int) or isinstance(y, float):
return {self.LABEL_KEY: str(prediction)} return {self.LABEL_KEY: str(y)}
else: else:
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a " raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences.") "float label, or a dictionary whose keys are labels and values are confidences.")
@ -131,60 +118,53 @@ class Label(AbstractOutput):
} }
class Image(AbstractOutput): class Image(OutputComponent):
''' '''
Component displays an image. Expects a numpy array of shape `(width, height, 3)` to be returned by the function, or a `matplotlib.pyplot` if `plot = True`. Component displays an output image.
Output type: numpy.array Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot]
''' '''
def __init__(self, plot=False, label=None): def __init__(self, type="numpy", plot=False, label=None):
''' '''
Parameters: Parameters:
plot (bool): whether to expect a plot to be returned by the function. type (str): Type of value to be passed to component. "numpy" expects a numpy array with shape (width, height, 3), "pil" expects a PIL image object, "file" expects a file path to the saved image, "plot" expects a matplotlib.pyplot object.
plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.plot = plot if plot:
warnings.warn("The 'plot' parameter has been deprecated. Set parameter 'type' to 'plot' instead.", DeprecationWarning)
self.type = plot
else:
self.type = type
super().__init__(label) super().__init__(label)
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"image": {}, "image": {},
"plot": {"plot": True} "plot": {"type": "plot"},
"pil": {"type": "pil"}
} }
def postprocess(self, prediction): def postprocess(self, y):
""" """
""" """
if self.plot: if type in ("numpy", "pil"):
try: if self.type == "pil":
return preprocessing_utils.encode_plot_to_base64(prediction) y = np.array(y)
except: return preprocessing_utils.encode_array_to_base64(y)
raise ValueError("The `Image` output interface expects a `matplotlib.pyplot` object" elif type == "file":
"if plt=True.") return preprocessing_utils.encode_file_to_base64(y)
elif type == "plot":
return preprocessing_utils.encode_plot_to_base64(y)
else: else:
try: raise ValueError("Unknown type: " + self.type + ". Please choose from: 'numpy', 'pil', 'file', 'plot'.")
return preprocessing_utils.encode_array_to_base64(prediction)
except:
raise ValueError(
"The `Image` output interface (with plt=False) expects a numpy array.")
def rebuild_flagged(self, dir, msg):
"""
Default rebuild method to decode a base64 image
"""
im = preprocessing_utils.decode_base64_to_image(msg)
timestamp = datetime.datetime.now()
filename = 'output_{}.png'.format(timestamp.
strftime("%Y-%m-%d-%H-%M-%S"))
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
class KeyValues(AbstractOutput): class KeyValues(OutputComponent):
''' '''
Component displays a table representing values for multiple fields. Component displays a table representing values for multiple fields.
Output type: List[Tuple[str, value]] Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]
''' '''
def __init__(self, label=None): def __init__(self, label=None):
@ -194,9 +174,11 @@ class KeyValues(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, dict): if isinstance(y, dict):
return prediction return list(y.items())
elif isinstance(y, list):
return y
else: else:
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are " raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values.") "labels and values are corresponding values.")
@ -208,19 +190,51 @@ class KeyValues(AbstractOutput):
} }
class HighlightedText(AbstractOutput): class HighlightedText(OutputComponent):
''' '''
Component creates text that contains spans that are highlighted by category or numerical value. Component creates text that contains spans that are highlighted by category or numerical value.
Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text. Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text.
Output type: List[Tuple[str, Union[float, str]]] Output type: List[Tuple[str, Union[float, str]]]
''' '''
def __init__(self, category_colors=None, label=None): def __init__(self, color_map=None, label=None):
''' '''
Parameters: Parameters:
category_colors (Dict[str, float]): color_map (Dict[str, str]): Map between category and respective colors
label (str): component name in interface. label (str): component name in interface.
''' '''
self.color_map = color_map
super().__init__(label)
def get_template_context(self):
return {
"color_map": self.color_map,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"highlight": {},
}
def postprocess(self, y):
return y
class Audio(OutputComponent):
'''
Creates an audio player that plays the output audio.
Output type: Union[Tuple[int, numpy.array], str]
'''
def __init__(self, type="numpy", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file path to the saved wav audio file.
label (str): component name in interface.
'''
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -231,21 +245,24 @@ class HighlightedText(AbstractOutput):
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"highlight": {}, "audio": {},
} }
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float): if self.type in ("numpy", "file"):
return str(prediction) if self.type == "numpy":
file = tempfile.NamedTemporaryFile()
scipy.io.wavfile.write(file, y[0], y[1])
y = file.name
return preprocessing_utils.encode_file_to_base64(y, type="audio", ext="wav")
else: else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or" raise ValueError("Unknown type: " + self.type + ". Please choose from: 'numpy', 'file'.")
"an int/float that can be converted to a string.")
class JSON(AbstractOutput): class JSON(OutputComponent):
''' '''
Used for JSON output. Expects a JSON string or a Python dictionary or list that can be converted to JSON. Used for JSON output. Expects a JSON string or a Python object that is JSON serializable.
Output type: Union[str, Dict[str, Any], List[Any]] Output type: Union[str, Any]
''' '''
def __init__(self, label=None): def __init__(self, label=None):
@ -255,14 +272,12 @@ class JSON(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, dict) or isinstance(prediction, list): if isinstance(y, str):
return json.dumps(prediction) return json.dumps(y)
elif isinstance(prediction, str):
return prediction
else: else:
raise ValueError("The `JSON` output interface expects an output that is a dictionary or list " return y
"or a preformatted JSON string.")
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
@ -271,9 +286,9 @@ class JSON(AbstractOutput):
} }
class HTML(AbstractOutput): class HTML(OutputComponent):
''' '''
Used for HTML output. Expects a JSON string or a Python dictionary or list that can be converted to JSON. Used for HTML output. Expects an HTML valid string.
Output type: str Output type: str
''' '''
@ -284,11 +299,6 @@ class HTML(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, str):
return prediction
else:
raise ValueError("The `HTML` output interface expects an output that is a str.")
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
@ -297,8 +307,69 @@ class HTML(AbstractOutput):
} }
# Automatically adds all shortcut implementations in AbstractInput into a dictionary. class File(OutputComponent):
shortcuts = {} '''
for cls in AbstractOutput.__subclasses__(): Used for file output. Expects a string path to a file if `return_path` is True.
for shortcut, parameters in cls.get_shortcut_implementations().items(): Output type: Union[io.BytesIO, str]
shortcuts[shortcut] = cls(**parameters) '''
def __init__(self, type="file", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "file" expects a file path, "str" exxpects a string to be returned as a file, "binary" expects an io.BytesIO object to be returned as a file.
label (str): component name in interface.
'''
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
class Dataframe(OutputComponent):
"""
Component displays 2D output through a spreadsheet interface.
Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, type="pandas", label=None):
'''
Parameters:
headers (List[str]): Header names to dataframe.
type (str): Type of value to be passed to component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for Python array.
label (str): component name in interface.
'''
self.type = type
self.headers = headers
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {"type": "pandas"},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array"},
}
def postprocess(self, y):
if self.type == "pandas":
return {"headers": list(y.columns), "data": y.values.tolist()}
elif self.type in ("numpy", "array"):
if self.type == "numpy":
y = y.tolist()
if len(y) == 0 or not isinstance(y[0], list):
y = [y]
return {"data": y}
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'pandas', 'numpy', 'array'.")

View File

@ -17,12 +17,14 @@ def decode_base64_to_image(encoding):
return Image.open(BytesIO(base64.b64decode(image_encoded))) return Image.open(BytesIO(base64.b64decode(image_encoded)))
def convert_file_to_base64(img): def encode_file_to_base64(f, type="image", ext=None):
with open(img, "rb") as image_file: with open(f, "rb") as file:
encoded_string = base64.b64encode(image_file.read()) encoded_string = base64.b64encode(file.read())
base64_str = str(encoded_string, 'utf-8') base64_str = str(encoded_string, 'utf-8')
type = img.split(".")[-1] if ext is None:
return "data:image/" + type + ";base64," + base64_str ext = f.split(".")[-1]
return "data:" + type + "/" + ext + ";base64," + base64_str
def encode_plot_to_base64(plt): def encode_plot_to_base64(plt):
with BytesIO() as output_bytes: with BytesIO() as output_bytes:
@ -61,19 +63,24 @@ def resize_and_crop(img, size, crop_type='center'):
return ImageOps.fit(img, size, centering=center) return ImageOps.fit(img, size, centering=center)
################## ##################
# AUDIO FILES # OUTPUT
################## ##################
def decode_base64_to_wav_file(encoding): def decode_base64_to_binary(encoding):
inp = encoding.split(';')[1].split(',')[1] inp = encoding.split(';')[1].split(',')[1]
wav_obj = base64.b64decode(inp) return base64.b64decode(inp)
def decode_base64_to_file(encoding):
file_obj = tempfile.NamedTemporaryFile() file_obj = tempfile.NamedTemporaryFile()
file_obj.close() file_obj.write(decode_base64_to_binary(encoding))
with open(file_obj.name, 'wb') as f:
f.write(wav_obj)
return file_obj return file_obj
##################
# AUDIO FILES
##################
def generate_mfcc_features_from_audio_file(wav_filename, def generate_mfcc_features_from_audio_file(wav_filename,
pre_emphasis=0.95, pre_emphasis=0.95,
frame_size= 0.025, frame_size= 0.025,

View File

@ -0,0 +1,15 @@
.file_display {
height: 100%;
display: flex;
justify-content: center;
align-items: center;
flex-direction: column;
}
.file_name {
font-size: 24px;
font-weight: bold;
margin-bottom: 18px;
}
.file_size {
font-size: 18px;
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

View File

@ -0,0 +1,22 @@
.highlight_legend {
margin-bottom: 4px;
}
.color_legend {
font-family: monospace;
padding: 4px;
border-radius: 2px;
display: flex;
justify-content: space-between;
background: linear-gradient(90deg, rgba(58,241,255,1) 0%, rgba(58,241,255,0) 49%, rgba(230,126,34,0) 50%, rgba(230,126,34,1) 100%);
margin-bottom: 4px;
}
.category-label {
display: inline-flex;
margin-right: 8px;
margin-bottom: 4px;
}
.category-label div {
width: 24px;
margin-right: 4px;
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

File diff suppressed because one or more lines are too long

View File

@ -13,15 +13,23 @@
/* Styles for the container of the tree (e.g. fonts, margins etc.) */ /* Styles for the container of the tree (e.g. fonts, margins etc.) */
.jsontree_tree { .jsontree_tree {
margin-left: 30px;
font-family: 'PT Mono', monospace; font-family: 'PT Mono', monospace;
font-size: 14px; font-size: 14px;
padding-left: 0;
} }
.jsontree_tree ul {
padding-left: 20px;
}
.jsontree_tree li {
list-style: none;
}
/* Styles for a list of child nodes */ /* Styles for a list of child nodes */
.jsontree_child-nodes { .jsontree_child-nodes {
display: none; display: none;
margin-left: 35px;
margin-bottom: 5px; margin-bottom: 5px;
line-height: 2; line-height: 2;
} }

File diff suppressed because one or more lines are too long

View File

@ -42,13 +42,22 @@ function gradio(config, fn, target) {
"checkboxgroup" : checkbox_group, "checkboxgroup" : checkbox_group,
"slider" : slider, "slider" : slider,
"dropdown" : dropdown, "dropdown" : dropdown,
"audio" : audio_input,
"file" : file_input,
"dataframe" : dataframe_input,
} }
let output_to_object_map = { let output_to_object_map = {
"csv" : {}, "csv" : {},
"image" : image_output, "image" : image_output,
"label" : label_output, "label" : label_output,
"keyvalues" : key_values, "keyvalues" : key_values,
"textbox" : textbox_output "textbox" : textbox_output,
"highlightedtext": highlighted_text,
"audio": audio_output,
"json": json_output,
"html": html_output,
"file" : file_output,
"dataframe" : dataframe_output,
} }
let id_to_interface_map = {} let id_to_interface_map = {}

View File

@ -0,0 +1,99 @@
const audio_input = {
html: `
<div class="upload_zone">
<img class="not_recording" src="/static/img/mic.png" />
<div class="recording hidden volume_display">
<div class="volume volume_left">
<div class="volume_bar"></div>
</div>
<img src="/static/img/mic_recording.png" />
<div class="volume volume_right">
<div class="volume_bar"></div>
</div>
</div>
<div class="not_recording input_caption">Click to Record from Microphone</div>
<div class="recording hidden input_caption">Click to Stop Recording</div>
</div>
<div class="player hidden">
<div class="waveform"></div>
<button class="playpause primary">Play / Pause</button>
</div>
`,
state: "NO_AUDIO",
init: function(opts) {
var io = this;
this.wavesurfer = WaveSurfer.create({
container: io.target.find('.waveform')[0],
waveColor: '#888888',
progressColor: '#e67e22',
barWidth: 3,
hideScrollbar: true
});
this.target.find(".upload_zone").click(function() {
if (io.state == "NO_AUDIO") {
if (!has_audio_loaded) {
loadAudio();
io.mic = new p5.AudioIn();
}
io.recorder = new p5.SoundRecorder();
io.soundFile = new p5.SoundFile();
io.recorder.setInput(io.mic);
io.target.find(".recording").removeClass("hidden");
io.target.find(".not_recording").hide();
io.state = "RECORDING";
io.mic.start();
io.recorder.record(io.soundFile);
io.interval_id = window.setInterval(function () {
var volume = Math.floor(100 * io.mic.getLevel());
io.target.find(".volume_bar").width(`${(volume > 0 ? 10 : 0) + Math.round(2 * Math.sqrt(10 * volume))}px`)
}, 100)
}
});
this.target.find(".upload_zone").mousedown(function() {
if (io.state == "RECORDING" || io.state == "STOP_RECORDING") {
io.target.find(".upload_zone").hide();
io.recorder.stop();
var blob = io.soundFile.getBlob();
var reader = new window.FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function() {
console.log(reader.result)
io.audio_data = reader.result;
io.target.find(".player").removeClass("hidden");
io.wavesurfer.load(io.audio_data);
if (io.state == "STOP_RECORDING") {
io.state = "RECORDED";
io.submit();
}
io.state = "RECORDED";
}
if (io.interval_id) {
window.clearInterval(io.interval_id);
}
}
})
this.target.find(".playpause").click(function () {
io.wavesurfer.playPause();
})
},
submit: function() {
if (this.state == "RECORDED") {
this.io_master.input(this.id, this.audio_data);
} else if (this.state == "RECORDING") {
this.state = "STOP_RECORDING";
this.target.find(".upload_zone").mousedown();
}
},
clear: function() {
this.audio_data = null;
this.state = "NO_AUDIO";
this.target.find(".not_recording").show();
this.target.find(".recording").addClass("hidden");
this.target.find(".player").addClass("hidden");
this.target.find(".upload_zone").show();
if (this.wavesurfer) {
this.wavesurfer.stop();
}
}
}

View File

@ -0,0 +1,54 @@
const dataframe_input = {
html: `
<div class="dataframe">
</div>
`,
init: function(opts) {
let row_count = opts.row_count;
let col_count = opts.col_count;
this.datatype = opts.datatype;
let data = [];
for (let i = 0; i < row_count; i++) {
let row = []
for (let j = 0; j < col_count; j++) {
row.push(null);
}
data.push(row);
}
let config = {data: data};
if (opts.headers || opts.datatype) {
let column_config = [];
for (let i = 0; i < col_count; i++) {
let column = {};
if (opts.datatype) {
let datatype = typeof opts.datatype === "string" ? opts.datatype : opts.datatype[i];
let datatype_map = {"str": "text", "bool": "checkbox", "number": "numeric", "date": "calendar"}
column.type = datatype_map[datatype];
}
if (opts.headers) {
column.title = opts.headers[i];
}
column_config.push(column);
}
config.columns = column_config;
}
this.config = config;
this.table = this.target.find(".dataframe").jexcel(config);
},
submit: function() {
let data = this.table.getData();
if (this.datatype) {
for (let i = 0; i < data[0].length; i++) {
if (this.datatype == "number" || (i < this.datatype.length && this.datatype[i].type == "number")) {
for (let j = 0; j < data.length; j++) {
let val = data[j][i];
data[j][i] = val == "" ? 0 : parseFloat(val);
}
}
}
}
this.io_master.input(this.id, data);
},
clear: function() {
}
}

View File

@ -0,0 +1,68 @@
const file_input = {
html: `
<div class="upload_zone drop_zone">
<div class="input_caption">Drop File Here<br>- or -<br>Click to Upload</div>
</div>
<div class="file_display hide">
<div class="file_name"></div>
<div class="file_size"></div>
</div>
<input class="hidden_upload" type="file" />`
,
init: function(opts) {
var io = this;
this.target.find(".upload_zone").click(function (e) {
io.target.find(".hidden_upload").click();
});
this.target.on('drag dragstart dragend dragover dragenter dragleave drop',
".drop_zone", function(e) {
e.preventDefault();
e.stopPropagation();
})
this.target.on('drop', '.drop_zone', function(e) {
files = e.originalEvent.dataTransfer.files;
io.load_preview_from_files(files)
});
this.target.find('.hidden_upload').on('change', function (e) {
if (this.files) {
io.load_preview_from_files(this.files);
}
})
},
submit: function() {
if (this.file_data) {
this.io_master.input(this.id, this.file_data);
}
},
load_preview_from_files: function(files) {
if (!files.length || !window.FileReader) {
return
}
var ReaderObj = new FileReader()
ReaderObj.readAsDataURL(files[0])
ReaderObj.io = this;
ReaderObj.onloadend = function() {
let io = this.io;
io.target.find(".upload_zone").hide();
io.target.find(".file_display").removeClass("hide");
io.target.find(".file_name").text(files[0].name);
let bytes = files[0].size;
let units = ["B", "KB", "MB", "GB", "PB"];
let i = 0;
while (bytes > 1024) {
bytes /= 1024;
i++;
}
let unit = units[i];
io.target.find(".file_size").text(bytes.toFixed(1) + " " + unit);
io.file_data = this.result;
}
},
clear: function() {
this.target.find(".upload_zone").show();
this.target.find(".file_display").addClass("hide");
this.target.find(".hidden_upload").prop("value", "")
this.file_data = null;
},
file_data: null,
}

View File

@ -48,7 +48,6 @@ const image_input = {
}) })
this.target.find('.edit_image').click(function (e) { this.target.find('.edit_image').click(function (e) {
io.overlay_target.removeClass("hide"); io.overlay_target.removeClass("hide");
io.target.find(".saliency_holder").addClass("hide");
}) })
this.tui_editor = new tui.ImageEditor(this.overlay_target. this.tui_editor = new tui.ImageEditor(this.overlay_target.
find(".image_editor")[0], { find(".image_editor")[0], {
@ -92,18 +91,6 @@ const image_input = {
this.target.find(".hidden_upload").prop("value", "") this.target.find(".hidden_upload").prop("value", "")
this.state = "NO_IMAGE"; this.state = "NO_IMAGE";
this.image_data = null; this.image_data = null;
this.target.find(".saliency_holder").addClass("hide");
},
output: function(data) {
if (this.target.find(".image_preview").attr("src")) {
var image = this.target.find(".image_preview");
var width = image.width();
var height = image.height();
this.target.find(".saliency_holder").removeClass("hide").html(`
<canvas class="saliency" width=${width} height=${height}></canvas>`);
var ctx = this.target.find(".saliency")[0].getContext('2d');
paintSaliency(ctx, width, height);
}
}, },
state: "NO_IMAGE", state: "NO_IMAGE",
image_data: null, image_data: null,

View File

@ -78,6 +78,7 @@ const microphone = {
if (this.state == "RECORDED") { if (this.state == "RECORDED") {
this.io_master.input(this.id, this.audio_data); this.io_master.input(this.id, this.audio_data);
} else if (this.state == "RECORDING") { } else if (this.state == "RECORDING") {
this.state = "STOP_RECORDING";
this.target.find(".upload_zone").mousedown(); this.target.find(".upload_zone").mousedown();
} }
}, },

View File

@ -9,14 +9,6 @@ const slider = {
let io = this; let io = this;
this.minimum = opts.minimum; this.minimum = opts.minimum;
this.target.css("height", "auto"); this.target.css("height", "auto");
let difference = opts.maximum - opts.minimum;
if (difference <= 1) {
step = 0.01;
} else if (difference <= 10) {
step = 0.1;
} else {
step = 1;
}
var handle = this.target.find(".ui-slider-handle"); var handle = this.target.find(".ui-slider-handle");
this.slider = this.target.find(".slider").slider({ this.slider = this.target.find(".slider").slider({
create: function() { create: function() {
@ -27,7 +19,7 @@ const slider = {
}, },
min: opts.minimum, min: opts.minimum,
max: opts.maximum, max: opts.maximum,
step: step step: opts.step
}); });
}, },
submit: function() { submit: function() {

View File

@ -0,0 +1,32 @@
const audio_output = {
html: `
<div class="player hidden">
<div class="waveform"></div>
<button class="playpause primary">Play / Pause</button>
</div>
`,
state: "NO_AUDIO",
init: function(opts) {
var io = this;
this.wavesurfer = WaveSurfer.create({
container: io.target.find('.waveform')[0],
waveColor: '#888888',
progressColor: '#e67e22',
barWidth: 3,
hideScrollbar: true
});
this.target.find(".playpause").click(function () {
io.wavesurfer.playPause();
})
},
output: function(data) {
io.target.find(".player").removeClass("hidden");
this.wavesurfer.load(data);
},
clear: function() {
this.target.find(".player").addClass("hidden");
if (this.wavesurfer) {
this.wavesurfer.stop();
}
}
}

View File

@ -0,0 +1,25 @@
const dataframe_output = {
html: `
<div class="dataframe"></div>
`,
init: function(opts) {
},
output: function(data) {
let config = {data: data.data};
if (data.headers) {
let column_config = [];
for (let header of data.headers) {
column_config.push({title: header});
}
config.columns = column_config;
}
if (this.table) {
this.clear();
}
this.table = this.target.find(".dataframe").jexcel(config);
},
clear: function() {
jexcel.destroy(this.target.find(".dataframe")[0]);
this.table = null;
}
}

View File

@ -0,0 +1,17 @@
const file_output = {
html: `
<div class="highlight_legend"></div>
<div class="output_text"></div>
`,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.target.find(".output_text").text(data);
},
submit: function() {
},
clear: function() {
this.target.find(".output_text").empty();
}
}

View File

@ -0,0 +1,92 @@
const highlighted_text = {
html: `
<div class="highlight_legend">
<div class="color_legend invisible">
<span>-1</span>
<span>0</span>
<span>+1</span>
</div>
<div class="category_legend invisible"></div>
</div>
<div class="output_text"></div>
`,
init: function(opts) {
this.target.css("height", "auto");
this.color_map = {};
if (opts.color_map) {
this.generate_category_legend(opts.color_map);
}
},
new_category_index: 0,
generate_category_legend: function(map) {
console.log(map)
let default_colors = ["pink", "lightblue", "gold", "plum", "lightskyblue", "greenyellow", "khaki", "cyan", "moccasin", "lightgray"]
for (let category in map) {
if (category in this.color_map) {
continue;
}
let color = map[category];
if (!color) {
if (this.new_category_index < default_colors.length) {
color = default_colors[this.new_category_index];
this.new_category_index++;
} else {
function randInt(min, max) {
return Math.floor(Math.random() * (max- min) + min);
}
color = "rgb(" + randInt(128, 240) + ", " + randInt(128, 240) + ", " + randInt(128, 240) + ")"
}
}
this.color_map[category] = color;
this.target.find(".category_legend").append(`
<div class="category-label">
<div style="background-color:${color}">&nbsp;</div>
${category}
</div>
`)
}
},
output: function(data) {
if (data.length == 0) {
return;
} else if (typeof(data[0][1]) == "string") {
this.target.find(".category_legend").removeClass("invisible");
let new_color_map = {};
for (let span of data) {
let category = span[1];
if (category != null) {
new_color_map[category] = null;
}
}
this.generate_category_legend(new_color_map);
let html = "";
for (let span of data) {
let category = span[1];
let color = category == null ? "white" : this.color_map[category];
html += `<span title="${category}" style="background-color: ${color}">${span[0]}</span>`
}
this.target.find(".output_text").html(html);
} else {
this.target.find(".color_legend").removeClass("invisible");
let html = "";
for (let span of data) {
let value = span[1];
let color = "";
if (value < 0) {
color = "8,241,255," + (-value);
} else {
color = "230,126,34," + value;
}
html += `<span title="${value}" style="background-color: rgba(${color})">${span[0]}</span>`
}
this.target.find(".output_text").html(html);
}
},
submit: function() {
},
clear: function() {
this.target.find(".output_text").empty();
this.target.find(".highlight_legend div").addClass("invisible");
}
}

View File

@ -0,0 +1,12 @@
const html_output = {
html: ``,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.target.html(data);
},
clear: function() {
this.target.empty();
}
}

View File

@ -0,0 +1,14 @@
const json_output = {
html: `
`,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.clear();
jsonTree.create(data, this.target[0]);
},
clear: function() {
this.target.empty();
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -33,6 +33,8 @@
<link type="text/css" href="../static/css/vendor/tui-color-picker.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/tui-color-picker.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/tui-image-editor.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/tui-image-editor.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jquery-ui.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/jquery-ui.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jexcel.min.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jsuites.min.css" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Open+Sans" rel="stylesheet"> <link href="https://fonts.googleapis.com/css?family=Open+Sans" rel="stylesheet">
<link rel="stylesheet" href="../static/css/style.css"> <link rel="stylesheet" href="../static/css/style.css">
@ -47,10 +49,15 @@
<link rel="stylesheet" href="../static/css/interfaces/input/slider.css"> <link rel="stylesheet" href="../static/css/interfaces/input/slider.css">
<link rel="stylesheet" href="../static/css/interfaces/input/webcam.css"> <link rel="stylesheet" href="../static/css/interfaces/input/webcam.css">
<link rel="stylesheet" href="../static/css/interfaces/input/microphone.css"> <link rel="stylesheet" href="../static/css/interfaces/input/microphone.css">
<link rel="stylesheet" href="../static/css/interfaces/input/file.css">
<link rel="stylesheet" href="../static/css/interfaces/output/image.css"> <link rel="stylesheet" href="../static/css/interfaces/output/image.css">
<link rel="stylesheet" href="../static/css/interfaces/output/label.css"> <link rel="stylesheet" href="../static/css/interfaces/output/label.css">
<link rel="stylesheet" href="../static/css/interfaces/output/key_values.css"> <link rel="stylesheet" href="../static/css/interfaces/output/key_values.css">
<link rel="stylesheet" href="../static/css/interfaces/output/textbox.css"> <link rel="stylesheet" href="../static/css/interfaces/output/textbox.css">
<link rel="stylesheet" href="../static/css/interfaces/output/highlighted_text.css">
<link rel="stylesheet" href="../static/css/interfaces/output/audio.css">
<link rel="stylesheet" href="../static/css/interfaces/output/json.css">
<link rel="stylesheet" href="../static/css/interfaces/output/html.css">
<link rel="stylesheet" href="../static/css/loading.css"/> <link rel="stylesheet" href="../static/css/loading.css"/>
</head> </head>
@ -77,6 +84,7 @@
<script src="../static/js/vendor/html2canvas.min.js"></script> <script src="../static/js/vendor/html2canvas.min.js"></script>
<script src="../static/js/vendor/jquery-ui.min.js"></script> <script src="../static/js/vendor/jquery-ui.min.js"></script>
<script src="../static/js/vendor/jquery.ui.touch-punch.js"></script> <script src="../static/js/vendor/jquery.ui.touch-punch.js"></script>
<script src="../static/js/vendor/jsonTree.js"></script>
<script src="../static/js/vendor/fabric.js"></script> <script src="../static/js/vendor/fabric.js"></script>
<script src="../static/js/vendor/tui-code-snippet.min.js"></script> <script src="../static/js/vendor/tui-code-snippet.min.js"></script>
<script src="../static/js/vendor/FileSaver.min.js"></script> <script src="../static/js/vendor/FileSaver.min.js"></script>
@ -84,6 +92,12 @@
<script src="../static/js/vendor/tui-image-editor.js"></script> <script src="../static/js/vendor/tui-image-editor.js"></script>
<script src="../static/js/vendor/white-theme.js"></script> <script src="../static/js/vendor/white-theme.js"></script>
<script src="../static/js/vendor/black-theme.js"></script> <script src="../static/js/vendor/black-theme.js"></script>
<script src="../static/js/vendor/wavesurfer.min.js"></script>
<script src="../static/js/vendor/p5.min.js"></script>
<script src="../static/js/vendor/p5.sound.min.js"></script>
<script src="../static/js/vendor/p5.dom.min.js"></script>
<script src="../static/js/vendor/jexcel.min.js"></script>
<script src="../static/js/vendor/jsuites.min.js"></script>
<script src="../static/js/utils.js"></script> <script src="../static/js/utils.js"></script>
<script src="../static/js/all_io.js"></script> <script src="../static/js/all_io.js"></script>
@ -97,18 +111,22 @@
<script src="../static/js/interfaces/input/checkbox.js"></script> <script src="../static/js/interfaces/input/checkbox.js"></script>
<script src="../static/js/interfaces/input/dropdown.js"></script> <script src="../static/js/interfaces/input/dropdown.js"></script>
<script src="../static/js/interfaces/input/slider.js"></script> <script src="../static/js/interfaces/input/slider.js"></script>
<script src="../static/js/interfaces/input/csv.js"></script> <script src="../static/js/interfaces/input/dataframe.js"></script>
<script src="../static/js/interfaces/input/audio.js"></script>
<script src="../static/js/interfaces/input/file.js"></script>
<script src="../static/js/vendor/webcam.min.js"></script> <script src="../static/js/vendor/webcam.min.js"></script>
<script src="../static/js/interfaces/input/webcam.js"></script> <script src="../static/js/interfaces/input/webcam.js"></script>
<script src="../static/js/interfaces/input/microphone.js"></script> <script src="../static/js/interfaces/input/microphone.js"></script>
<script src="../static/js/vendor/wavesurfer.min.js"></script>
<script src="../static/js/vendor/p5.min.js"></script>
<script src="../static/js/vendor/p5.sound.min.js"></script>
<script src="../static/js/vendor/p5.dom.min.js"></script>
<script src="../static/js/interfaces/output/image.js"></script> <script src="../static/js/interfaces/output/image.js"></script>
<script src="../static/js/interfaces/output/label.js"></script> <script src="../static/js/interfaces/output/label.js"></script>
<script src="../static/js/interfaces/output/key_values.js"></script> <script src="../static/js/interfaces/output/key_values.js"></script>
<script src="../static/js/interfaces/output/textbox.js"></script> <script src="../static/js/interfaces/output/textbox.js"></script>
<script src="../static/js/interfaces/output/highlighted_text.js"></script>
<script src="../static/js/interfaces/output/audio.js"></script>
<script src="../static/js/interfaces/output/json.js"></script>
<script src="../static/js/interfaces/output/html.js"></script>
<script src="../static/js/interfaces/output/dataframe.js"></script>
<script src="../static/js/interfaces/output/file.js"></script>
<script src="../static/js/gradio.js"></script> <script src="../static/js/gradio.js"></script>
<script> <script>
$.getJSON("static/config.json", function(config) { $.getJSON("static/config.json", function(config) {

9
demo/files.py Normal file
View File

@ -0,0 +1,9 @@
import gradio as gr
import random
def upload(file):
print(file.name)
with file:
return file.name
gr.Interface(upload, "file", "text").launch()

12
demo/matrix.py Normal file
View File

@ -0,0 +1,12 @@
import gradio as gr
import numpy as np
import random
def transpose(matrix):
print(matrix)
return matrix.T
gr.Interface(transpose,
gr.inputs.Dataframe(type="numpy", datatype="number", row_count=5, col_count=3),
"numpy"
).launch()

32
demo/multi1.py Normal file
View File

@ -0,0 +1,32 @@
import gradio as gr
import numpy as np
import random
def answer_question(text, audio):
return [
("The movie was ", "good"),
("unexpectedly, ", "great"),
("a fantastic experience ", "neutral"),
], {
"address": "1 Main St.",
"bedrooms": 5,
"is_apt": False,
"residents": [
{"name": "Farhan", "age": 13},
{"name": "Aziz", "age": 18},
{"name": "Fozan", "age": None},
]
}, "<div style='background-color: pink; padding: 2px;'>" + str(audio[1].shape) + "</div>", ""
gr.Interface(answer_question,
[
gr.inputs.Dropdown(["cat", "dog", "bird"]),
gr.inputs.Microphone(),
],
[
gr.outputs.HighlightedText(color_map={"good": "lightgreen", "bad": "pink"}),
gr.outputs.JSON(),
gr.outputs.HTML(),
gr.outputs.Audio(),
],
).launch()

14
demo/records.py Normal file
View File

@ -0,0 +1,14 @@
import gradio as gr
import numpy as np
import random
def filter_records(records, gender):
return records[records['gender'] == gender]
gr.Interface(filter_records,
[
gr.inputs.Dataframe(headers=["name", "age", "gender"], datatype=["str", "number", "str"], row_count=5),
gr.inputs.Dropdown(["M", "F", "O"])
],
"dataframe"
).launch()

9
demo/reverse_audio.py Normal file
View File

@ -0,0 +1,9 @@
import gradio as gr
import numpy as np
import random
def reverse_audio(audio):
sr, data = audio
return (sr, np.flipud(data))
gr.Interface(reverse_audio, "microphone", "audio").launch()

View File

@ -408,7 +408,7 @@ class Microphone(AbstractInput):
""" """
By default, no pre-processing is applied to a microphone input file By default, no pre-processing is applied to a microphone input file
""" """
file_obj = preprocessing_utils.decode_base64_to_wav_file(inp) file_obj = preprocessing_utils.decode_base64_to_file(inp)
if self.preprocessing == "mfcc": if self.preprocessing == "mfcc":
return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name) return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name) _, signal = scipy.io.wavfile.read(file_obj.name)

View File

@ -2,6 +2,7 @@ MANIFEST.in
README.md README.md
setup.py setup.py
gradio/__init__.py gradio/__init__.py
gradio/component.py
gradio/generate_docs.py gradio/generate_docs.py
gradio/inputs.py gradio/inputs.py
gradio/interface.py gradio/interface.py
@ -22,6 +23,7 @@ gradio/static/css/loading.css
gradio/static/css/style.css gradio/static/css/style.css
gradio/static/css/interfaces/input/checkbox_group.css gradio/static/css/interfaces/input/checkbox_group.css
gradio/static/css/interfaces/input/dropdown.css gradio/static/css/interfaces/input/dropdown.css
gradio/static/css/interfaces/input/file.css
gradio/static/css/interfaces/input/image.css gradio/static/css/interfaces/input/image.css
gradio/static/css/interfaces/input/microphone.css gradio/static/css/interfaces/input/microphone.css
gradio/static/css/interfaces/input/radio.css gradio/static/css/interfaces/input/radio.css
@ -29,13 +31,19 @@ gradio/static/css/interfaces/input/sketchpad.css
gradio/static/css/interfaces/input/slider.css gradio/static/css/interfaces/input/slider.css
gradio/static/css/interfaces/input/textbox.css gradio/static/css/interfaces/input/textbox.css
gradio/static/css/interfaces/input/webcam.css gradio/static/css/interfaces/input/webcam.css
gradio/static/css/interfaces/output/audio.css
gradio/static/css/interfaces/output/highlighted_text.css
gradio/static/css/interfaces/output/html.css
gradio/static/css/interfaces/output/image.css gradio/static/css/interfaces/output/image.css
gradio/static/css/interfaces/output/json.css
gradio/static/css/interfaces/output/key_values.css gradio/static/css/interfaces/output/key_values.css
gradio/static/css/interfaces/output/label.css gradio/static/css/interfaces/output/label.css
gradio/static/css/interfaces/output/textbox.css gradio/static/css/interfaces/output/textbox.css
gradio/static/css/vendor/icons.svg gradio/static/css/vendor/icons.svg
gradio/static/css/vendor/jexcel.min.css
gradio/static/css/vendor/jquery-ui.css gradio/static/css/vendor/jquery-ui.css
gradio/static/css/vendor/jsonTree.css gradio/static/css/vendor/jsonTree.css
gradio/static/css/vendor/jsuites.min.css
gradio/static/css/vendor/tui-color-picker.css gradio/static/css/vendor/tui-color-picker.css
gradio/static/css/vendor/tui-image-editor.css gradio/static/css/vendor/tui-image-editor.css
gradio/static/css/vendor/images/ui-bg_flat_0_aaaaaa_40x100.png gradio/static/css/vendor/images/ui-bg_flat_0_aaaaaa_40x100.png
@ -62,9 +70,12 @@ gradio/static/img/vendor/icon-d.svg
gradio/static/js/all_io.js gradio/static/js/all_io.js
gradio/static/js/gradio.js gradio/static/js/gradio.js
gradio/static/js/utils.js gradio/static/js/utils.js
gradio/static/js/interfaces/input/audio.js
gradio/static/js/interfaces/input/checkbox.js gradio/static/js/interfaces/input/checkbox.js
gradio/static/js/interfaces/input/checkbox_group.js gradio/static/js/interfaces/input/checkbox_group.js
gradio/static/js/interfaces/input/dataframe.js
gradio/static/js/interfaces/input/dropdown.js gradio/static/js/interfaces/input/dropdown.js
gradio/static/js/interfaces/input/file.js
gradio/static/js/interfaces/input/image.js gradio/static/js/interfaces/input/image.js
gradio/static/js/interfaces/input/microphone.js gradio/static/js/interfaces/input/microphone.js
gradio/static/js/interfaces/input/radio.js gradio/static/js/interfaces/input/radio.js
@ -72,7 +83,13 @@ gradio/static/js/interfaces/input/sketchpad.js
gradio/static/js/interfaces/input/slider.js gradio/static/js/interfaces/input/slider.js
gradio/static/js/interfaces/input/textbox.js gradio/static/js/interfaces/input/textbox.js
gradio/static/js/interfaces/input/webcam.js gradio/static/js/interfaces/input/webcam.js
gradio/static/js/interfaces/output/audio.js
gradio/static/js/interfaces/output/dataframe.js
gradio/static/js/interfaces/output/file.js
gradio/static/js/interfaces/output/highlighted_text.js
gradio/static/js/interfaces/output/html.js
gradio/static/js/interfaces/output/image.js gradio/static/js/interfaces/output/image.js
gradio/static/js/interfaces/output/json.js
gradio/static/js/interfaces/output/key_values.js gradio/static/js/interfaces/output/key_values.js
gradio/static/js/interfaces/output/label.js gradio/static/js/interfaces/output/label.js
gradio/static/js/interfaces/output/textbox.js gradio/static/js/interfaces/output/textbox.js
@ -80,10 +97,12 @@ gradio/static/js/vendor/FileSaver.min.js
gradio/static/js/vendor/black-theme.js gradio/static/js/vendor/black-theme.js
gradio/static/js/vendor/fabric.js gradio/static/js/vendor/fabric.js
gradio/static/js/vendor/html2canvas.min.js gradio/static/js/vendor/html2canvas.min.js
gradio/static/js/vendor/jexcel.min.js
gradio/static/js/vendor/jquery-ui.min.js gradio/static/js/vendor/jquery-ui.min.js
gradio/static/js/vendor/jquery.min.js gradio/static/js/vendor/jquery.min.js
gradio/static/js/vendor/jquery.ui.touch-punch.js gradio/static/js/vendor/jquery.ui.touch-punch.js
gradio/static/js/vendor/jsonTree.js gradio/static/js/vendor/jsonTree.js
gradio/static/js/vendor/jsuites.min.js
gradio/static/js/vendor/p5.dom.min.js gradio/static/js/vendor/p5.dom.min.js
gradio/static/js/vendor/p5.min.js gradio/static/js/vendor/p5.min.js
gradio/static/js/vendor/p5.sound.min.js gradio/static/js/vendor/p5.sound.min.js

38
gradio/component.py Normal file
View File

@ -0,0 +1,38 @@
class Component():
"""
A class for defining the methods that all gradio input and output components should have.
"""
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def preprocess(self, x):
"""
Any preprocessing needed to be performed on function input.
"""
return x
def postprocess(self, y):
"""
Any postprocessing needed to be performed on function output.
"""
return y
def process_example(self, example):
"""
Proprocess example for UI
"""
return example
@classmethod
def get_shortcut_implementations(cls):
"""
Return dictionary of shortcut implementations
"""
return {}

View File

@ -1,6 +1,6 @@
import json import json
from gradio.inputs import AbstractInput from gradio.inputs import InputComponent
from gradio.outputs import AbstractOutput from gradio.outputs import OutputComponent
from gradio.interface import Interface from gradio.interface import Interface
import inspect import inspect
@ -44,8 +44,8 @@ def document(cls_set):
docset.append(inp) docset.append(inp)
return docset return docset
inputs = document(AbstractInput.__subclasses__()) inputs = document(InputComponent.__subclasses__())
outputs = document(AbstractOutput.__subclasses__()) outputs = document(OutputComponent.__subclasses__())
interface_params = get_params(Interface.__init__) interface_params = get_params(Interface.__init__)
interface = { interface = {
"doc": inspect.getdoc(Interface), "doc": inspect.getdoc(Interface),

View File

@ -1,6 +1,6 @@
""" """
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
`AbstractInput`, and each class must define a path to its template. All of the subclasses of `AbstractInput` are `InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code. automatically added to a registry, which allows them to be easily referenced in other parts of the code.
""" """
@ -9,80 +9,55 @@ import json
import os import os
import time import time
import warnings import warnings
from abc import ABC, abstractmethod from gradio.component import Component
import numpy as np import numpy as np
import PIL.Image import PIL.Image
import PIL.ImageOps import PIL.ImageOps
import scipy.io.wavfile import scipy.io.wavfile
from gradio import preprocessing_utils, validation_data from gradio import preprocessing_utils, validation_data
import pandas as pd
import math
import tempfile
# Where to find the static resources associated with each template. class InputComponent(Component):
# BASE_INPUT_INTERFACE_TEMPLATE_PATH = 'static/js/interfaces/input/{}.js'
BASE_INPUT_INTERFACE_JS_PATH = 'static/js/interfaces/input/{}.js'
class AbstractInput(ABC):
""" """
An abstract class for defining the methods that all gradio inputs should have. Input Component. All input components subclass this.
When this is subclassed, it is automatically added to the registry
""" """
def __init__(self, label):
self.label = label
def get_validation_inputs(self):
"""
An interface can optionally implement a method that returns a list of examples inputs that it should be able to
accept and preprocess for validation purposes.
"""
return []
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def preprocess(self, inp):
"""
By default, no pre-processing is applied to text.
"""
return inp
def process_example(self, example):
"""
Proprocess example for UI
"""
return example
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_all_shortcut_implementations(cls):
""" shortcuts = {}
Return dictionary of shortcut implementations for sub_cls in cls.__subclasses__():
""" for shortcut, parameters in sub_cls.get_shortcut_implementations().items():
return {} shortcuts[shortcut] = (sub_cls, parameters)
return shortcuts
class Textbox(AbstractInput): class Textbox(InputComponent):
""" """
Component creates a textbox for user to enter input. Provides a string (or number is `is_numeric` is true) as an argument to the wrapped function. Component creates a textbox for user to enter input. Provides a string (or number is `type` is "float") as an argument to the wrapped function.
Input type: str Input type: str
""" """
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, label=None): def __init__(self, lines=1, placeholder=None, default=None, numeric=False, type="str", label=None):
''' '''
Parameters: Parameters:
lines (int): number of line rows to provide in textarea. lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea. placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea. default (str): default text to provide in textarea.
numeric (bool): whether the input should be parsed as a number instead of a string. numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): Type of value to be returned by component. "str" returns a string, "number" returns a float value.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.lines = lines self.lines = lines
self.placeholder = placeholder self.placeholder = placeholder
self.default = default self.default = default
self.numeric = numeric if numeric:
warnings.warn("The 'numeric' parameter has been deprecated. Set parameter 'type' to 'number' instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -98,20 +73,20 @@ class Textbox(AbstractInput):
return { return {
"text": {}, "text": {},
"textbox": {"lines": 7}, "textbox": {"lines": 7},
"number": {"numeric": True} "number": {"type": "number"}
} }
def preprocess(self, inp): def preprocess(self, x):
""" if self.type == "str":
Cast type of input return x
""" elif self.type == "number":
if self.numeric: return float(x)
return float(inp)
else: else:
return inp raise ValueError("Unknown type: " + self.type + ". Please choose from: 'str', 'number'.")
class Slider(AbstractInput):
class Slider(InputComponent):
""" """
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function. Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float Input type: float
@ -128,6 +103,11 @@ class Slider(AbstractInput):
''' '''
self.minimum = minimum self.minimum = minimum
self.maximum = maximum self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 1)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default self.default = minimum if default is None else default
super().__init__(label) super().__init__(label)
@ -135,6 +115,7 @@ class Slider(AbstractInput):
return { return {
"minimum": self.minimum, "minimum": self.minimum,
"maximum": self.maximum, "maximum": self.maximum,
"step": self.step,
"default": self.default, "default": self.default,
**super().get_template_context() **super().get_template_context()
} }
@ -146,7 +127,7 @@ class Slider(AbstractInput):
} }
class Checkbox(AbstractInput): class Checkbox(InputComponent):
""" """
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function. Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool Input type: bool
@ -166,19 +147,21 @@ class Checkbox(AbstractInput):
} }
class CheckboxGroup(AbstractInput): class CheckboxGroup(InputComponent):
""" """
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function. Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: List[str] Input type: Union[List[str], List[int]]
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="choices", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -187,20 +170,30 @@ class CheckboxGroup(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Radio(AbstractInput):
class Radio(InputComponent):
""" """
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: str Input type: Union[str, int]
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="value", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -209,20 +202,29 @@ class Radio(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Dropdown(AbstractInput): class Dropdown(InputComponent):
""" """
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function. Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: str Input type: str
""" """
def __init__(self, choices, label=None): def __init__(self, choices, type="value", label=None):
''' '''
Parameters: Parameters:
choices (List[str]): list of options to select from. choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.choices = choices self.choices = choices
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -231,69 +233,210 @@ class Dropdown(AbstractInput):
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'value', 'index'.")
class Image(AbstractInput):
class Image(InputComponent):
""" """
Component creates an image upload box with editing capabilities. Provides numpy array of shape `(width, height, 3)` if `image_mode` is "RGB" as an argument to the wrapped function. Provides numpy array of shape `(width, height)` if `image_mode` is "L" as an argument to the wrapped function. Component creates an image upload box with editing capabilities.
Input type: numpy.array Input type: Union[numpy.array, PIL.Image, str]
""" """
def __init__(self, shape=None, image_mode='RGB', label=None): def __init__(self, shape=None, image_mode='RGB', source="upload", tools=["brush", "crop", "rotate", "undo", "filter"], type="numpy", label=None):
''' '''
Parameters: Parameters:
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size. shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white. image_mode (str): "RGB" if color, or "L" if black and white.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tools (List[str]): Tools available to user to edit images. "brush" allows user to draw on image, "crop" allows user to select portion of image, "rotate" allows user to rotate or flip image, "undo" allows user to revert changes, "filter" allows user to apply filters on image.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3), "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name.
label (str): component name in interface. label (str): component name in interface.
''' '''
if shape is None: self.shape = shape
self.image_width, self.image_height = None, None
else:
self.image_width = shape[0]
self.image_height = shape[1]
self.image_mode = image_mode self.image_mode = image_mode
self.source = source
self.tools = tools
self.type = type
super().__init__(label) super().__init__(label)
def get_validation_inputs(self):
return validation_data.BASE64_COLOR_IMAGES
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"image": {}, "image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "tools": ["brush"]},
"paint": {"source": "canvas", "tools": ["brush", "undo"]},
} }
def get_template_context(self): def get_template_context(self):
return { return {
"image_mode": self.image_mode,
"source": self.source,
"tools": self.tools,
**super().get_template_context() **super().get_template_context()
} }
def preprocess(self, inp): def preprocess(self, x):
""" im = preprocessing_utils.decode_base64_to_image(x)
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
"""
im = preprocessing_utils.decode_base64_to_image(inp)
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
im = im.convert(self.image_mode) im = im.convert(self.image_mode)
image_width, image_height = self.image_width, self.image_height image_width, image_height = self.image_width, self.image_height
if image_width is None: if self.shape is not None:
image_width = im.size[0] im = preprocessing_utils.resize_and_crop(
if image_height is None: im, (self.shape[0], self.shape[1]))
image_height = im.size[1] if self.type == "pil":
im = preprocessing_utils.resize_and_crop( return im
im, (image_width, image_height)) elif self.type == "numpy":
return np.array(im) return np.array(im)
elif self.type == "file":
file_obj = tempfile.NamedTemporaryFile()
im.save(file_obj.name)
return file_obj
def process_example(self, example): def process_example(self, example):
if os.path.exists(example): if os.path.exists(example):
return preprocessing_utils.convert_file_to_base64(example) return preprocessing_utils.encode_file_to_base64(example)
else: else:
return example return example
class Sketchpad(AbstractInput): class Audio(InputComponent):
""" """
Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function. Component accepts audio input files. Provides numpy array of shape `(samples, 2)` as an argument to the wrapped function.
Input type: Union[Tuple[int, numpy.array], str, numpy.array]
"""
def __init__(self, source="upload", type="numpy", label=None):
'''
Parameters:
source (str): Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input.
type (str): Type of value to be returned by component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file object whose path can be retrieved by file_obj.name, "mfcc" returns the mfcc coefficients of the input audio.
label (str): component name in interface.
'''
self.source = source
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"audio": {},
"microphone": {"source": "microphone"}
}
def preprocess(self, x):
"""
By default, no pre-processing is applied to a microphone input file
"""
file_obj = preprocessing_utils.decode_base64_to_file(x)
if self.type == "file":
return file_obj
elif self.type == "numpy":
return scipy.io.wavfile.read(file_obj.name)
elif self.type == "mfcc":
return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
class File(InputComponent):
"""
Component accepts generic file uploads.
Input type: Union[str, bytes]
"""
def __init__(self, type="file", label=None):
'''
Parameters:
type (str): Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object.
label (str): component name in interface.
'''
self.type = type
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
def preprocess(self, x):
if self.type == "file":
return preprocessing_utils.decode_base64_to_file(x)
elif self.type == "bytes":
return preprocessing_utils.decode_base64_to_binary(x)
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'file', 'bytes'.")
class Dataframe(InputComponent):
"""
Component accepts 2D input through a spreadsheet interface.
Input type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, row_count=3, col_count=3, datatype="str", type="pandas", label=None):
'''
Parameters:
headers (List[str]): Header names to dataframe.
row_count (int): Limit number of rows for input.
col_count (int): Limit number of columns for input. If equal to 1, return data will be one-dimensional. Ignored if `headers` is provided.
datatype (Union[str, List[str]]): Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", and "date".
type (str): Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array.
label (str): component name in interface.
'''
self.headers = headers
self.datatype = datatype
self.row_count = row_count
self.col_count = len(headers) if headers else col_count
self.type = type
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
"datatype": self.datatype,
"row_count": self.row_count,
"col_count": self.col_count,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {"type": "pandas"},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array", "col_count": 1},
}
def preprocess(self, x):
if self.type == "pandas":
if self.headers:
return pd.DataFrame(x, columns=self.headers)
else:
return pd.DataFrame(x)
if self.col_count == 1:
x = x[0]
if self.type == "numpy":
return np.array(x)
elif self.type == "array":
return x
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'pandas', 'numpy', 'array'.")
# DEPRECATED COMPONENTS
class Sketchpad(InputComponent):
"""
DEPRECATED. Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function.
Input type: numpy.array Input type: numpy.array
""" """
@ -306,23 +449,18 @@ class Sketchpad(AbstractInput):
flatten (bool): whether to reshape the numpy array to a single dimension. flatten (bool): whether to reshape the numpy array to a single dimension.
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Sketchpad has been deprecated. Please use 'Image' component to generate a sketchpad. The string shorcut 'sketchpad' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0] self.image_width = shape[0]
self.image_height = shape[1] self.image_height = shape[1]
self.invert_colors = invert_colors self.invert_colors = invert_colors
self.flatten = flatten self.flatten = flatten
super().__init__(label) super().__init__(label)
@classmethod def preprocess(self, x):
def get_shortcut_implementations(cls):
return {
"sketchpad": {},
}
def preprocess(self, inp):
""" """
Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28 Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
""" """
im_transparent = preprocessing_utils.decode_base64_to_image(inp) im_transparent = preprocessing_utils.decode_base64_to_image(x)
# Create a white background for the alpha channel # Create a white background for the alpha channel
im = PIL.Image.new("RGBA", im_transparent.size, "WHITE") im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")
im.paste(im_transparent, (0, 0), im_transparent) im.paste(im_transparent, (0, 0), im_transparent)
@ -339,12 +477,12 @@ class Sketchpad(AbstractInput):
return array return array
def process_example(self, example): def process_example(self, example):
return preprocessing_utils.convert_file_to_base64(example) return preprocessing_utils.encode_file_to_base64(example)
class Webcam(AbstractInput): class Webcam(InputComponent):
""" """
Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function. DEPRECATED. Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function.
Input type: numpy.array Input type: numpy.array
""" """
@ -354,34 +492,26 @@ class Webcam(AbstractInput):
shape (Tuple[int, int]): shape to crop and resize image to. shape (Tuple[int, int]): shape to crop and resize image to.
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Webcam has been deprecated. Please use 'Image' component to generate a webcam. The string shorcut 'webcam' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0] self.image_width = shape[0]
self.image_height = shape[1] self.image_height = shape[1]
self.num_channels = 3 self.num_channels = 3
super().__init__(label) super().__init__(label)
def get_validation_inputs(self): def preprocess(self, x):
return validation_data.BASE64_COLOR_IMAGES
@classmethod
def get_shortcut_implementations(cls):
return {
"webcam": {},
}
def preprocess(self, inp):
""" """
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48 Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
""" """
im = preprocessing_utils.decode_base64_to_image(inp) im = preprocessing_utils.decode_base64_to_image(x)
im = im.convert('RGB') im = im.convert('RGB')
im = preprocessing_utils.resize_and_crop( im = preprocessing_utils.resize_and_crop(
im, (self.image_width, self.image_height)) im, (self.image_width, self.image_height))
return np.array(im) return np.array(im)
class Microphone(AbstractInput): class Microphone(InputComponent):
""" """
Component creates a microphone element for audio inputs. Provides numpy array of shape `(samples, 2)` as an argument to the wrapped function. DEPRECATED. Component creates a microphone element for audio inputs.
Input type: numpy.array Input type: numpy.array
""" """
@ -391,6 +521,7 @@ class Microphone(AbstractInput):
preprocessing (Union[str, Callable]): preprocessing to apply to input preprocessing (Union[str, Callable]): preprocessing to apply to input
label (str): component name in interface. label (str): component name in interface.
''' '''
warnings.warn("Microphone has been deprecated. Please use 'Audio' component to generate a microphone. The string shorcut 'microphone' has been moved to the Audio component.", DeprecationWarning)
super().__init__(label) super().__init__(label)
if preprocessing is None or preprocessing == "mfcc": if preprocessing is None or preprocessing == "mfcc":
self.preprocessing = preprocessing self.preprocessing = preprocessing
@ -398,25 +529,13 @@ class Microphone(AbstractInput):
raise ValueError( raise ValueError(
"unexpected value for preprocessing", preprocessing) "unexpected value for preprocessing", preprocessing)
@classmethod def preprocess(self, x):
def get_shortcut_implementations(cls):
return {
"microphone": {},
}
def preprocess(self, inp):
""" """
By default, no pre-processing is applied to a microphone input file By default, no pre-processing is applied to a microphone input file
""" """
file_obj = preprocessing_utils.decode_base64_to_wav_file(inp) file_obj = preprocessing_utils.decode_base64_to_file(x)
if self.preprocessing == "mfcc": if self.preprocessing == "mfcc":
return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name) return preprocessing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name) _, signal = scipy.io.wavfile.read(file_obj.name)
return signal return signal
# Automatically adds all shortcut implementations in AbstractInput into a dictionary.
shortcuts = {}
for cls in AbstractInput.__subclasses__():
for shortcut, parameters in cls.get_shortcut_implementations().items():
shortcuts[shortcut] = cls(**parameters)

View File

@ -7,8 +7,8 @@ import tempfile
import traceback import traceback
import webbrowser import webbrowser
import gradio.inputs from gradio.inputs import InputComponent
import gradio.outputs from gradio.outputs import OutputComponent
from gradio import networking, strings from gradio import networking, strings
from distutils.version import StrictVersion from distutils.version import StrictVersion
import pkg_resources import pkg_resources
@ -45,8 +45,8 @@ class Interface:
""" """
Parameters: Parameters:
fn (Callable): the function to wrap an interface around. fn (Callable): the function to wrap an interface around.
inputs (Union[str, List[Union[str, AbstractInput]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn. inputs (Union[str, List[Union[str, InputComponent]]]): a single Gradio input component, or list of Gradio input components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of input components should match the number of parameters in fn.
outputs (Union[str, List[Union[str, AbstractOutput]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn. outputs (Union[str, List[Union[str, OutputComponent]]]): a single Gradio output component, or list of Gradio output components. Components can either be passed as instantiated objects, or referred to by their string shortcuts. The number of output components should match the number of values returned by fn.
live (bool): whether the interface should automatically reload on change. live (bool): whether the interface should automatically reload on change.
capture_session (bool): if True, captures the default graph and session (needed for Tensorflow 1.x) capture_session (bool): if True, captures the default graph and session (needed for Tensorflow 1.x)
title (str): a title for the interface; if provided, appears above the input and output components. title (str): a title for the interface; if provided, appears above the input and output components.
@ -55,22 +55,24 @@ class Interface:
""" """
def get_input_instance(iface): def get_input_instance(iface):
if isinstance(iface, str): if isinstance(iface, str):
return gradio.inputs.shortcuts[iface.lower()] shortcut = InputComponent.get_all_shortcut_implementations()[iface]
elif isinstance(iface, gradio.inputs.AbstractInput): return shortcut[0](**shortcut[1])
elif isinstance(iface, InputComponent):
return iface return iface
else: else:
raise ValueError("Input interface must be of type `str` or " raise ValueError("Input interface must be of type `str` or "
"`AbstractInput`") "`InputComponent`")
def get_output_instance(iface): def get_output_instance(iface):
if isinstance(iface, str): if isinstance(iface, str):
return gradio.outputs.shortcuts[iface.lower()] shortcut = OutputComponent.get_all_shortcut_implementations()[iface]
elif isinstance(iface, gradio.outputs.AbstractOutput): return shortcut[0](**shortcut[1])
elif isinstance(iface, OutputComponent):
return iface return iface
else: else:
raise ValueError( raise ValueError(
"Output interface must be of type `str` or " "Output interface must be of type `str` or "
"`AbstractOutput`" "`OutputComponent`"
) )
if isinstance(inputs, list): if isinstance(inputs, list):
self.input_interfaces = [get_input_instance(i) for i in inputs] self.input_interfaces = [get_input_instance(i) for i in inputs]

View File

@ -1,61 +1,47 @@
""" """
This module defines various classes that can serve as the `output` to an interface. Each class must inherit from This module defines various classes that can serve as the `output` to an interface. Each class must inherit from
`AbstractOutput`, and each class must define a path to its template. All of the subclasses of `AbstractOutput` are `OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code. automatically added to a registry, which allows them to be easily referenced in other parts of the code.
""" """
from abc import ABC, abstractmethod from gradio.component import Component
import numpy as np import numpy as np
import json import json
from gradio import preprocessing_utils from gradio import preprocessing_utils
import datetime import datetime
import operator import operator
from numbers import Number from numbers import Number
import warnings
import tempfile
import scipy
# Where to find the static resources associated with each template. class OutputComponent(Component):
BASE_OUTPUT_INTERFACE_JS_PATH = 'static/js/interfaces/output/{}.js'
class AbstractOutput(ABC):
""" """
An abstract class for defining the methods that all gradio inputs should have. Output Component. All output components subclass this.
When this is subclassed, it is automatically added to the registry
""" """
def __init__(self, label):
self.label = label
def get_template_context(self):
"""
:return: a dictionary with context variables for the javascript file associated with the context
"""
return {"label": self.label}
def postprocess(self, prediction):
"""
Any postprocessing needed to be performed on function output.
"""
return prediction
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_all_shortcut_implementations(cls):
""" shortcuts = {}
Return dictionary of shortcut implementations for sub_cls in cls.__subclasses__():
""" for shortcut, parameters in sub_cls.get_shortcut_implementations().items():
return {} shortcuts[shortcut] = (sub_cls, parameters)
return shortcuts
class Textbox(AbstractOutput): class Textbox(OutputComponent):
''' '''
Component creates a textbox to render output text or number. Component creates a textbox to render output text or number.
Output type: str Output type: Union[str, float, int]
''' '''
def __init__(self, label=None): def __init__(self, type="str", label=None):
''' '''
Parameters: Parameters:
type (str): Type of value to be passed to component. "str" expects a string, "number" expects a float value.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -66,20 +52,21 @@ class Textbox(AbstractOutput):
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"text": {}, "text": {"type": "str"},
"textbox": {}, "textbox": {"type": "str"},
"number": {}, "number": {"type": "number"},
} }
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float): if self.type == "str":
return str(prediction) return y
elif self.type == "number":
return str(y)
else: else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or" raise ValueError("Unknown type: " + self.type + ". Please choose from: 'str', 'number'")
"an int/float that can be converted to a string.")
class Label(AbstractOutput): class Label(OutputComponent):
''' '''
Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1. Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1.
Output type: Union[Dict[str, float], str, int, float] Output type: Union[Dict[str, float], str, int, float]
@ -98,12 +85,12 @@ class Label(AbstractOutput):
self.num_top_classes = num_top_classes self.num_top_classes = num_top_classes
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, Number): if isinstance(y, str) or isinstance(y, Number):
return {"label": str(prediction)} return {"label": str(y)}
elif isinstance(prediction, dict): elif isinstance(y, dict):
sorted_pred = sorted( sorted_pred = sorted(
prediction.items(), y.items(),
key=operator.itemgetter(1), key=operator.itemgetter(1),
reverse=True reverse=True
) )
@ -118,8 +105,8 @@ class Label(AbstractOutput):
} for pred in sorted_pred } for pred in sorted_pred
] ]
} }
elif isinstance(prediction, int) or isinstance(prediction, float): elif isinstance(y, int) or isinstance(y, float):
return {self.LABEL_KEY: str(prediction)} return {self.LABEL_KEY: str(y)}
else: else:
raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a " raise ValueError("The `Label` output interface expects one of: a string label, or an int label, a "
"float label, or a dictionary whose keys are labels and values are confidences.") "float label, or a dictionary whose keys are labels and values are confidences.")
@ -131,60 +118,53 @@ class Label(AbstractOutput):
} }
class Image(AbstractOutput): class Image(OutputComponent):
''' '''
Component displays an image. Expects a numpy array of shape `(width, height, 3)` to be returned by the function, or a `matplotlib.pyplot` if `plot = True`. Component displays an output image.
Output type: numpy.array Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot]
''' '''
def __init__(self, plot=False, label=None): def __init__(self, type="numpy", plot=False, label=None):
''' '''
Parameters: Parameters:
plot (bool): whether to expect a plot to be returned by the function. type (str): Type of value to be passed to component. "numpy" expects a numpy array with shape (width, height, 3), "pil" expects a PIL image object, "file" expects a file path to the saved image, "plot" expects a matplotlib.pyplot object.
plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function.
label (str): component name in interface. label (str): component name in interface.
''' '''
self.plot = plot if plot:
warnings.warn("The 'plot' parameter has been deprecated. Set parameter 'type' to 'plot' instead.", DeprecationWarning)
self.type = plot
else:
self.type = type
super().__init__(label) super().__init__(label)
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"image": {}, "image": {},
"plot": {"plot": True} "plot": {"type": "plot"},
"pil": {"type": "pil"}
} }
def postprocess(self, prediction): def postprocess(self, y):
""" """
""" """
if self.plot: if type in ("numpy", "pil"):
try: if self.type == "pil":
return preprocessing_utils.encode_plot_to_base64(prediction) y = np.array(y)
except: return preprocessing_utils.encode_array_to_base64(y)
raise ValueError("The `Image` output interface expects a `matplotlib.pyplot` object" elif type == "file":
"if plt=True.") return preprocessing_utils.encode_file_to_base64(y)
elif type == "plot":
return preprocessing_utils.encode_plot_to_base64(y)
else: else:
try: raise ValueError("Unknown type: " + self.type + ". Please choose from: 'numpy', 'pil', 'file', 'plot'.")
return preprocessing_utils.encode_array_to_base64(prediction)
except:
raise ValueError(
"The `Image` output interface (with plt=False) expects a numpy array.")
def rebuild_flagged(self, dir, msg):
"""
Default rebuild method to decode a base64 image
"""
im = preprocessing_utils.decode_base64_to_image(msg)
timestamp = datetime.datetime.now()
filename = 'output_{}.png'.format(timestamp.
strftime("%Y-%m-%d-%H-%M-%S"))
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
class KeyValues(AbstractOutput): class KeyValues(OutputComponent):
''' '''
Component displays a table representing values for multiple fields. Component displays a table representing values for multiple fields.
Output type: List[Tuple[str, value]] Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]
''' '''
def __init__(self, label=None): def __init__(self, label=None):
@ -194,9 +174,11 @@ class KeyValues(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, dict): if isinstance(y, dict):
return prediction return list(y.items())
elif isinstance(y, list):
return y
else: else:
raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are " raise ValueError("The `KeyValues` output interface expects an output that is a dictionary whose keys are "
"labels and values are corresponding values.") "labels and values are corresponding values.")
@ -208,19 +190,51 @@ class KeyValues(AbstractOutput):
} }
class HighlightedText(AbstractOutput): class HighlightedText(OutputComponent):
''' '''
Component creates text that contains spans that are highlighted by category or numerical value. Component creates text that contains spans that are highlighted by category or numerical value.
Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text. Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text.
Output type: List[Tuple[str, Union[float, str]]] Output type: List[Tuple[str, Union[float, str]]]
''' '''
def __init__(self, category_colors=None, label=None): def __init__(self, color_map=None, label=None):
''' '''
Parameters: Parameters:
category_colors (Dict[str, float]): color_map (Dict[str, str]): Map between category and respective colors
label (str): component name in interface. label (str): component name in interface.
''' '''
self.color_map = color_map
super().__init__(label)
def get_template_context(self):
return {
"color_map": self.color_map,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"highlight": {},
}
def postprocess(self, y):
return y
class Audio(OutputComponent):
'''
Creates an audio player that plays the output audio.
Output type: Union[Tuple[int, numpy.array], str]
'''
def __init__(self, type="numpy", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a temporary file path to the saved wav audio file.
label (str): component name in interface.
'''
self.type = type
super().__init__(label) super().__init__(label)
def get_template_context(self): def get_template_context(self):
@ -231,21 +245,24 @@ class HighlightedText(AbstractOutput):
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
return { return {
"highlight": {}, "audio": {},
} }
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, str) or isinstance(prediction, int) or isinstance(prediction, float): if self.type in ("numpy", "file"):
return str(prediction) if self.type == "numpy":
file = tempfile.NamedTemporaryFile()
scipy.io.wavfile.write(file, y[0], y[1])
y = file.name
return preprocessing_utils.encode_file_to_base64(y, type="audio", ext="wav")
else: else:
raise ValueError("The `Textbox` output interface expects an output that is one of: a string, or" raise ValueError("Unknown type: " + self.type + ". Please choose from: 'numpy', 'file'.")
"an int/float that can be converted to a string.")
class JSON(AbstractOutput): class JSON(OutputComponent):
''' '''
Used for JSON output. Expects a JSON string or a Python dictionary or list that can be converted to JSON. Used for JSON output. Expects a JSON string or a Python object that is JSON serializable.
Output type: Union[str, Dict[str, Any], List[Any]] Output type: Union[str, Any]
''' '''
def __init__(self, label=None): def __init__(self, label=None):
@ -255,14 +272,12 @@ class JSON(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction): def postprocess(self, y):
if isinstance(prediction, dict) or isinstance(prediction, list): if isinstance(y, str):
return json.dumps(prediction) return json.dumps(y)
elif isinstance(prediction, str):
return prediction
else: else:
raise ValueError("The `JSON` output interface expects an output that is a dictionary or list " return y
"or a preformatted JSON string.")
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
@ -271,9 +286,9 @@ class JSON(AbstractOutput):
} }
class HTML(AbstractOutput): class HTML(OutputComponent):
''' '''
Used for HTML output. Expects a JSON string or a Python dictionary or list that can be converted to JSON. Used for HTML output. Expects an HTML valid string.
Output type: str Output type: str
''' '''
@ -284,11 +299,6 @@ class HTML(AbstractOutput):
''' '''
super().__init__(label) super().__init__(label)
def postprocess(self, prediction):
if isinstance(prediction, str):
return prediction
else:
raise ValueError("The `HTML` output interface expects an output that is a str.")
@classmethod @classmethod
def get_shortcut_implementations(cls): def get_shortcut_implementations(cls):
@ -297,8 +307,69 @@ class HTML(AbstractOutput):
} }
# Automatically adds all shortcut implementations in AbstractInput into a dictionary. class File(OutputComponent):
shortcuts = {} '''
for cls in AbstractOutput.__subclasses__(): Used for file output. Expects a string path to a file if `return_path` is True.
for shortcut, parameters in cls.get_shortcut_implementations().items(): Output type: Union[bytes, str]
shortcuts[shortcut] = cls(**parameters) '''
def __init__(self, type="file", label=None):
'''
Parameters:
type (str): Type of value to be passed to component. "file" expects a file path, "str" exxpects a string to be returned as a file, "binary" expects an bytes object to be returned as a file.
label (str): component name in interface.
'''
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
}
class Dataframe(OutputComponent):
"""
Component displays 2D output through a spreadsheet interface.
Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, type="pandas", label=None):
'''
Parameters:
headers (List[str]): Header names to dataframe.
type (str): Type of value to be passed to component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for Python array.
label (str): component name in interface.
'''
self.type = type
self.headers = headers
super().__init__(label)
def get_template_context(self):
return {
"headers": self.headers,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"dataframe": {"type": "pandas"},
"numpy": {"type": "numpy"},
"matrix": {"type": "array"},
"list": {"type": "array"},
}
def postprocess(self, y):
if self.type == "pandas":
return {"headers": list(y.columns), "data": y.values.tolist()}
elif self.type in ("numpy", "array"):
if self.type == "numpy":
y = y.tolist()
if len(y) == 0 or not isinstance(y[0], list):
y = [y]
return {"data": y}
else:
raise ValueError("Unknown type: " + self.type + ". Please choose from: 'pandas', 'numpy', 'array'.")

View File

@ -17,12 +17,14 @@ def decode_base64_to_image(encoding):
return Image.open(BytesIO(base64.b64decode(image_encoded))) return Image.open(BytesIO(base64.b64decode(image_encoded)))
def convert_file_to_base64(img): def encode_file_to_base64(f, type="image", ext=None):
with open(img, "rb") as image_file: with open(f, "rb") as file:
encoded_string = base64.b64encode(image_file.read()) encoded_string = base64.b64encode(file.read())
base64_str = str(encoded_string, 'utf-8') base64_str = str(encoded_string, 'utf-8')
type = img.split(".")[-1] if ext is None:
return "data:image/" + type + ";base64," + base64_str ext = f.split(".")[-1]
return "data:" + type + "/" + ext + ";base64," + base64_str
def encode_plot_to_base64(plt): def encode_plot_to_base64(plt):
with BytesIO() as output_bytes: with BytesIO() as output_bytes:
@ -61,19 +63,24 @@ def resize_and_crop(img, size, crop_type='center'):
return ImageOps.fit(img, size, centering=center) return ImageOps.fit(img, size, centering=center)
################## ##################
# AUDIO FILES # OUTPUT
################## ##################
def decode_base64_to_wav_file(encoding): def decode_base64_to_binary(encoding):
inp = encoding.split(';')[1].split(',')[1] inp = encoding.split(';')[1].split(',')[1]
wav_obj = base64.b64decode(inp) return base64.b64decode(inp)
def decode_base64_to_file(encoding):
file_obj = tempfile.NamedTemporaryFile() file_obj = tempfile.NamedTemporaryFile()
file_obj.close() file_obj.write(decode_base64_to_binary(encoding))
with open(file_obj.name, 'wb') as f:
f.write(wav_obj)
return file_obj return file_obj
##################
# AUDIO FILES
##################
def generate_mfcc_features_from_audio_file(wav_filename, def generate_mfcc_features_from_audio_file(wav_filename,
pre_emphasis=0.95, pre_emphasis=0.95,
frame_size= 0.025, frame_size= 0.025,

View File

@ -0,0 +1,15 @@
.file_display {
height: 100%;
display: flex;
justify-content: center;
align-items: center;
flex-direction: column;
}
.file_name {
font-size: 24px;
font-weight: bold;
margin-bottom: 18px;
}
.file_size {
font-size: 18px;
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

View File

@ -0,0 +1,22 @@
.highlight_legend {
margin-bottom: 4px;
}
.color_legend {
font-family: monospace;
padding: 4px;
border-radius: 2px;
display: flex;
justify-content: space-between;
background: linear-gradient(90deg, rgba(58,241,255,1) 0%, rgba(58,241,255,0) 49%, rgba(230,126,34,0) 50%, rgba(230,126,34,1) 100%);
margin-bottom: 4px;
}
.category-label {
display: inline-flex;
margin-right: 8px;
margin-bottom: 4px;
}
.category-label div {
width: 24px;
margin-right: 4px;
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

View File

@ -0,0 +1,15 @@
.output_text {
width: 100%;
font-size: 18px;
outline: none;
background-color: white;
border: solid 1px lightgray;
border-radius: 2px;
box-sizing: border-box;
padding: 4px;
min-height: 30px;
font-family: monospace;
white-space: pre-wrap; /* CSS3 */
white-space: -moz-pre-wrap; /* Firefox */
word-wrap: break-word; /* IE */
}

File diff suppressed because one or more lines are too long

View File

@ -13,15 +13,23 @@
/* Styles for the container of the tree (e.g. fonts, margins etc.) */ /* Styles for the container of the tree (e.g. fonts, margins etc.) */
.jsontree_tree { .jsontree_tree {
margin-left: 30px;
font-family: 'PT Mono', monospace; font-family: 'PT Mono', monospace;
font-size: 14px; font-size: 14px;
padding-left: 0;
} }
.jsontree_tree ul {
padding-left: 20px;
}
.jsontree_tree li {
list-style: none;
}
/* Styles for a list of child nodes */ /* Styles for a list of child nodes */
.jsontree_child-nodes { .jsontree_child-nodes {
display: none; display: none;
margin-left: 35px;
margin-bottom: 5px; margin-bottom: 5px;
line-height: 2; line-height: 2;
} }

File diff suppressed because one or more lines are too long

View File

@ -42,13 +42,22 @@ function gradio(config, fn, target) {
"checkboxgroup" : checkbox_group, "checkboxgroup" : checkbox_group,
"slider" : slider, "slider" : slider,
"dropdown" : dropdown, "dropdown" : dropdown,
"audio" : audio_input,
"file" : file_input,
"dataframe" : dataframe_input,
} }
let output_to_object_map = { let output_to_object_map = {
"csv" : {}, "csv" : {},
"image" : image_output, "image" : image_output,
"label" : label_output, "label" : label_output,
"keyvalues" : key_values, "keyvalues" : key_values,
"textbox" : textbox_output "textbox" : textbox_output,
"highlightedtext": highlighted_text,
"audio": audio_output,
"json": json_output,
"html": html_output,
"file" : file_output,
"dataframe" : dataframe_output,
} }
let id_to_interface_map = {} let id_to_interface_map = {}

View File

@ -0,0 +1,99 @@
const audio_input = {
html: `
<div class="upload_zone">
<img class="not_recording" src="/static/img/mic.png" />
<div class="recording hidden volume_display">
<div class="volume volume_left">
<div class="volume_bar"></div>
</div>
<img src="/static/img/mic_recording.png" />
<div class="volume volume_right">
<div class="volume_bar"></div>
</div>
</div>
<div class="not_recording input_caption">Click to Record from Microphone</div>
<div class="recording hidden input_caption">Click to Stop Recording</div>
</div>
<div class="player hidden">
<div class="waveform"></div>
<button class="playpause primary">Play / Pause</button>
</div>
`,
state: "NO_AUDIO",
init: function(opts) {
var io = this;
this.wavesurfer = WaveSurfer.create({
container: io.target.find('.waveform')[0],
waveColor: '#888888',
progressColor: '#e67e22',
barWidth: 3,
hideScrollbar: true
});
this.target.find(".upload_zone").click(function() {
if (io.state == "NO_AUDIO") {
if (!has_audio_loaded) {
loadAudio();
io.mic = new p5.AudioIn();
}
io.recorder = new p5.SoundRecorder();
io.soundFile = new p5.SoundFile();
io.recorder.setInput(io.mic);
io.target.find(".recording").removeClass("hidden");
io.target.find(".not_recording").hide();
io.state = "RECORDING";
io.mic.start();
io.recorder.record(io.soundFile);
io.interval_id = window.setInterval(function () {
var volume = Math.floor(100 * io.mic.getLevel());
io.target.find(".volume_bar").width(`${(volume > 0 ? 10 : 0) + Math.round(2 * Math.sqrt(10 * volume))}px`)
}, 100)
}
});
this.target.find(".upload_zone").mousedown(function() {
if (io.state == "RECORDING" || io.state == "STOP_RECORDING") {
io.target.find(".upload_zone").hide();
io.recorder.stop();
var blob = io.soundFile.getBlob();
var reader = new window.FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function() {
console.log(reader.result)
io.audio_data = reader.result;
io.target.find(".player").removeClass("hidden");
io.wavesurfer.load(io.audio_data);
if (io.state == "STOP_RECORDING") {
io.state = "RECORDED";
io.submit();
}
io.state = "RECORDED";
}
if (io.interval_id) {
window.clearInterval(io.interval_id);
}
}
})
this.target.find(".playpause").click(function () {
io.wavesurfer.playPause();
})
},
submit: function() {
if (this.state == "RECORDED") {
this.io_master.input(this.id, this.audio_data);
} else if (this.state == "RECORDING") {
this.state = "STOP_RECORDING";
this.target.find(".upload_zone").mousedown();
}
},
clear: function() {
this.audio_data = null;
this.state = "NO_AUDIO";
this.target.find(".not_recording").show();
this.target.find(".recording").addClass("hidden");
this.target.find(".player").addClass("hidden");
this.target.find(".upload_zone").show();
if (this.wavesurfer) {
this.wavesurfer.stop();
}
}
}

View File

@ -0,0 +1,54 @@
const dataframe_input = {
html: `
<div class="dataframe">
</div>
`,
init: function(opts) {
let row_count = opts.row_count;
let col_count = opts.col_count;
this.datatype = opts.datatype;
let data = [];
for (let i = 0; i < row_count; i++) {
let row = []
for (let j = 0; j < col_count; j++) {
row.push(null);
}
data.push(row);
}
let config = {data: data};
if (opts.headers || opts.datatype) {
let column_config = [];
for (let i = 0; i < col_count; i++) {
let column = {};
if (opts.datatype) {
let datatype = typeof opts.datatype === "string" ? opts.datatype : opts.datatype[i];
let datatype_map = {"str": "text", "bool": "checkbox", "number": "numeric", "date": "calendar"}
column.type = datatype_map[datatype];
}
if (opts.headers) {
column.title = opts.headers[i];
}
column_config.push(column);
}
config.columns = column_config;
}
this.config = config;
this.table = this.target.find(".dataframe").jexcel(config);
},
submit: function() {
let data = this.table.getData();
if (this.datatype) {
for (let i = 0; i < data[0].length; i++) {
if (this.datatype == "number" || (i < this.datatype.length && this.datatype[i].type == "number")) {
for (let j = 0; j < data.length; j++) {
let val = data[j][i];
data[j][i] = val == "" ? 0 : parseFloat(val);
}
}
}
}
this.io_master.input(this.id, data);
},
clear: function() {
}
}

View File

@ -0,0 +1,68 @@
const file_input = {
html: `
<div class="upload_zone drop_zone">
<div class="input_caption">Drop File Here<br>- or -<br>Click to Upload</div>
</div>
<div class="file_display hide">
<div class="file_name"></div>
<div class="file_size"></div>
</div>
<input class="hidden_upload" type="file" />`
,
init: function(opts) {
var io = this;
this.target.find(".upload_zone").click(function (e) {
io.target.find(".hidden_upload").click();
});
this.target.on('drag dragstart dragend dragover dragenter dragleave drop',
".drop_zone", function(e) {
e.preventDefault();
e.stopPropagation();
})
this.target.on('drop', '.drop_zone', function(e) {
files = e.originalEvent.dataTransfer.files;
io.load_preview_from_files(files)
});
this.target.find('.hidden_upload').on('change', function (e) {
if (this.files) {
io.load_preview_from_files(this.files);
}
})
},
submit: function() {
if (this.file_data) {
this.io_master.input(this.id, this.file_data);
}
},
load_preview_from_files: function(files) {
if (!files.length || !window.FileReader) {
return
}
var ReaderObj = new FileReader()
ReaderObj.readAsDataURL(files[0])
ReaderObj.io = this;
ReaderObj.onloadend = function() {
let io = this.io;
io.target.find(".upload_zone").hide();
io.target.find(".file_display").removeClass("hide");
io.target.find(".file_name").text(files[0].name);
let bytes = files[0].size;
let units = ["B", "KB", "MB", "GB", "PB"];
let i = 0;
while (bytes > 1024) {
bytes /= 1024;
i++;
}
let unit = units[i];
io.target.find(".file_size").text(bytes.toFixed(1) + " " + unit);
io.file_data = this.result;
}
},
clear: function() {
this.target.find(".upload_zone").show();
this.target.find(".file_display").addClass("hide");
this.target.find(".hidden_upload").prop("value", "")
this.file_data = null;
},
file_data: null,
}

View File

@ -48,7 +48,6 @@ const image_input = {
}) })
this.target.find('.edit_image').click(function (e) { this.target.find('.edit_image').click(function (e) {
io.overlay_target.removeClass("hide"); io.overlay_target.removeClass("hide");
io.target.find(".saliency_holder").addClass("hide");
}) })
this.tui_editor = new tui.ImageEditor(this.overlay_target. this.tui_editor = new tui.ImageEditor(this.overlay_target.
find(".image_editor")[0], { find(".image_editor")[0], {
@ -92,18 +91,6 @@ const image_input = {
this.target.find(".hidden_upload").prop("value", "") this.target.find(".hidden_upload").prop("value", "")
this.state = "NO_IMAGE"; this.state = "NO_IMAGE";
this.image_data = null; this.image_data = null;
this.target.find(".saliency_holder").addClass("hide");
},
output: function(data) {
if (this.target.find(".image_preview").attr("src")) {
var image = this.target.find(".image_preview");
var width = image.width();
var height = image.height();
this.target.find(".saliency_holder").removeClass("hide").html(`
<canvas class="saliency" width=${width} height=${height}></canvas>`);
var ctx = this.target.find(".saliency")[0].getContext('2d');
paintSaliency(ctx, width, height);
}
}, },
state: "NO_IMAGE", state: "NO_IMAGE",
image_data: null, image_data: null,

View File

@ -78,6 +78,7 @@ const microphone = {
if (this.state == "RECORDED") { if (this.state == "RECORDED") {
this.io_master.input(this.id, this.audio_data); this.io_master.input(this.id, this.audio_data);
} else if (this.state == "RECORDING") { } else if (this.state == "RECORDING") {
this.state = "STOP_RECORDING";
this.target.find(".upload_zone").mousedown(); this.target.find(".upload_zone").mousedown();
} }
}, },

View File

@ -9,14 +9,6 @@ const slider = {
let io = this; let io = this;
this.minimum = opts.minimum; this.minimum = opts.minimum;
this.target.css("height", "auto"); this.target.css("height", "auto");
let difference = opts.maximum - opts.minimum;
if (difference <= 1) {
step = 0.01;
} else if (difference <= 10) {
step = 0.1;
} else {
step = 1;
}
var handle = this.target.find(".ui-slider-handle"); var handle = this.target.find(".ui-slider-handle");
this.slider = this.target.find(".slider").slider({ this.slider = this.target.find(".slider").slider({
create: function() { create: function() {
@ -27,7 +19,7 @@ const slider = {
}, },
min: opts.minimum, min: opts.minimum,
max: opts.maximum, max: opts.maximum,
step: step step: opts.step
}); });
}, },
submit: function() { submit: function() {

View File

@ -0,0 +1,32 @@
const audio_output = {
html: `
<div class="player hidden">
<div class="waveform"></div>
<button class="playpause primary">Play / Pause</button>
</div>
`,
state: "NO_AUDIO",
init: function(opts) {
var io = this;
this.wavesurfer = WaveSurfer.create({
container: io.target.find('.waveform')[0],
waveColor: '#888888',
progressColor: '#e67e22',
barWidth: 3,
hideScrollbar: true
});
this.target.find(".playpause").click(function () {
io.wavesurfer.playPause();
})
},
output: function(data) {
io.target.find(".player").removeClass("hidden");
this.wavesurfer.load(data);
},
clear: function() {
this.target.find(".player").addClass("hidden");
if (this.wavesurfer) {
this.wavesurfer.stop();
}
}
}

View File

@ -0,0 +1,25 @@
const dataframe_output = {
html: `
<div class="dataframe"></div>
`,
init: function(opts) {
},
output: function(data) {
let config = {data: data.data};
if (data.headers) {
let column_config = [];
for (let header of data.headers) {
column_config.push({title: header});
}
config.columns = column_config;
}
if (this.table) {
this.clear();
}
this.table = this.target.find(".dataframe").jexcel(config);
},
clear: function() {
jexcel.destroy(this.target.find(".dataframe")[0]);
this.table = null;
}
}

View File

@ -0,0 +1,17 @@
const file_output = {
html: `
<div class="highlight_legend"></div>
<div class="output_text"></div>
`,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.target.find(".output_text").text(data);
},
submit: function() {
},
clear: function() {
this.target.find(".output_text").empty();
}
}

View File

@ -0,0 +1,92 @@
const highlighted_text = {
html: `
<div class="highlight_legend">
<div class="color_legend invisible">
<span>-1</span>
<span>0</span>
<span>+1</span>
</div>
<div class="category_legend invisible"></div>
</div>
<div class="output_text"></div>
`,
init: function(opts) {
this.target.css("height", "auto");
this.color_map = {};
if (opts.color_map) {
this.generate_category_legend(opts.color_map);
}
},
new_category_index: 0,
generate_category_legend: function(map) {
console.log(map)
let default_colors = ["pink", "lightblue", "gold", "plum", "lightskyblue", "greenyellow", "khaki", "cyan", "moccasin", "lightgray"]
for (let category in map) {
if (category in this.color_map) {
continue;
}
let color = map[category];
if (!color) {
if (this.new_category_index < default_colors.length) {
color = default_colors[this.new_category_index];
this.new_category_index++;
} else {
function randInt(min, max) {
return Math.floor(Math.random() * (max- min) + min);
}
color = "rgb(" + randInt(128, 240) + ", " + randInt(128, 240) + ", " + randInt(128, 240) + ")"
}
}
this.color_map[category] = color;
this.target.find(".category_legend").append(`
<div class="category-label">
<div style="background-color:${color}">&nbsp;</div>
${category}
</div>
`)
}
},
output: function(data) {
if (data.length == 0) {
return;
} else if (typeof(data[0][1]) == "string") {
this.target.find(".category_legend").removeClass("invisible");
let new_color_map = {};
for (let span of data) {
let category = span[1];
if (category != null) {
new_color_map[category] = null;
}
}
this.generate_category_legend(new_color_map);
let html = "";
for (let span of data) {
let category = span[1];
let color = category == null ? "white" : this.color_map[category];
html += `<span title="${category}" style="background-color: ${color}">${span[0]}</span>`
}
this.target.find(".output_text").html(html);
} else {
this.target.find(".color_legend").removeClass("invisible");
let html = "";
for (let span of data) {
let value = span[1];
let color = "";
if (value < 0) {
color = "8,241,255," + (-value);
} else {
color = "230,126,34," + value;
}
html += `<span title="${value}" style="background-color: rgba(${color})">${span[0]}</span>`
}
this.target.find(".output_text").html(html);
}
},
submit: function() {
},
clear: function() {
this.target.find(".output_text").empty();
this.target.find(".highlight_legend div").addClass("invisible");
}
}

View File

@ -0,0 +1,12 @@
const html_output = {
html: ``,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.target.html(data);
},
clear: function() {
this.target.empty();
}
}

View File

@ -0,0 +1,14 @@
const json_output = {
html: `
`,
init: function(opts) {
this.target.css("height", "auto");
},
output: function(data) {
this.clear();
jsonTree.create(data, this.target[0]);
},
clear: function() {
this.target.empty();
}
}

1
gradio/static/js/vendor/jexcel.min.js vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -33,6 +33,8 @@
<link type="text/css" href="../static/css/vendor/tui-color-picker.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/tui-color-picker.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/tui-image-editor.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/tui-image-editor.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jquery-ui.css" rel="stylesheet"> <link type="text/css" href="../static/css/vendor/jquery-ui.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jexcel.min.css" rel="stylesheet">
<link type="text/css" href="../static/css/vendor/jsuites.min.css" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Open+Sans" rel="stylesheet"> <link href="https://fonts.googleapis.com/css?family=Open+Sans" rel="stylesheet">
<link rel="stylesheet" href="../static/css/style.css"> <link rel="stylesheet" href="../static/css/style.css">
@ -47,10 +49,15 @@
<link rel="stylesheet" href="../static/css/interfaces/input/slider.css"> <link rel="stylesheet" href="../static/css/interfaces/input/slider.css">
<link rel="stylesheet" href="../static/css/interfaces/input/webcam.css"> <link rel="stylesheet" href="../static/css/interfaces/input/webcam.css">
<link rel="stylesheet" href="../static/css/interfaces/input/microphone.css"> <link rel="stylesheet" href="../static/css/interfaces/input/microphone.css">
<link rel="stylesheet" href="../static/css/interfaces/input/file.css">
<link rel="stylesheet" href="../static/css/interfaces/output/image.css"> <link rel="stylesheet" href="../static/css/interfaces/output/image.css">
<link rel="stylesheet" href="../static/css/interfaces/output/label.css"> <link rel="stylesheet" href="../static/css/interfaces/output/label.css">
<link rel="stylesheet" href="../static/css/interfaces/output/key_values.css"> <link rel="stylesheet" href="../static/css/interfaces/output/key_values.css">
<link rel="stylesheet" href="../static/css/interfaces/output/textbox.css"> <link rel="stylesheet" href="../static/css/interfaces/output/textbox.css">
<link rel="stylesheet" href="../static/css/interfaces/output/highlighted_text.css">
<link rel="stylesheet" href="../static/css/interfaces/output/audio.css">
<link rel="stylesheet" href="../static/css/interfaces/output/json.css">
<link rel="stylesheet" href="../static/css/interfaces/output/html.css">
<link rel="stylesheet" href="../static/css/loading.css"/> <link rel="stylesheet" href="../static/css/loading.css"/>
</head> </head>
@ -77,6 +84,7 @@
<script src="../static/js/vendor/html2canvas.min.js"></script> <script src="../static/js/vendor/html2canvas.min.js"></script>
<script src="../static/js/vendor/jquery-ui.min.js"></script> <script src="../static/js/vendor/jquery-ui.min.js"></script>
<script src="../static/js/vendor/jquery.ui.touch-punch.js"></script> <script src="../static/js/vendor/jquery.ui.touch-punch.js"></script>
<script src="../static/js/vendor/jsonTree.js"></script>
<script src="../static/js/vendor/fabric.js"></script> <script src="../static/js/vendor/fabric.js"></script>
<script src="../static/js/vendor/tui-code-snippet.min.js"></script> <script src="../static/js/vendor/tui-code-snippet.min.js"></script>
<script src="../static/js/vendor/FileSaver.min.js"></script> <script src="../static/js/vendor/FileSaver.min.js"></script>
@ -84,6 +92,12 @@
<script src="../static/js/vendor/tui-image-editor.js"></script> <script src="../static/js/vendor/tui-image-editor.js"></script>
<script src="../static/js/vendor/white-theme.js"></script> <script src="../static/js/vendor/white-theme.js"></script>
<script src="../static/js/vendor/black-theme.js"></script> <script src="../static/js/vendor/black-theme.js"></script>
<script src="../static/js/vendor/wavesurfer.min.js"></script>
<script src="../static/js/vendor/p5.min.js"></script>
<script src="../static/js/vendor/p5.sound.min.js"></script>
<script src="../static/js/vendor/p5.dom.min.js"></script>
<script src="../static/js/vendor/jexcel.min.js"></script>
<script src="../static/js/vendor/jsuites.min.js"></script>
<script src="../static/js/utils.js"></script> <script src="../static/js/utils.js"></script>
<script src="../static/js/all_io.js"></script> <script src="../static/js/all_io.js"></script>
@ -97,18 +111,22 @@
<script src="../static/js/interfaces/input/checkbox.js"></script> <script src="../static/js/interfaces/input/checkbox.js"></script>
<script src="../static/js/interfaces/input/dropdown.js"></script> <script src="../static/js/interfaces/input/dropdown.js"></script>
<script src="../static/js/interfaces/input/slider.js"></script> <script src="../static/js/interfaces/input/slider.js"></script>
<script src="../static/js/interfaces/input/csv.js"></script> <script src="../static/js/interfaces/input/dataframe.js"></script>
<script src="../static/js/interfaces/input/audio.js"></script>
<script src="../static/js/interfaces/input/file.js"></script>
<script src="../static/js/vendor/webcam.min.js"></script> <script src="../static/js/vendor/webcam.min.js"></script>
<script src="../static/js/interfaces/input/webcam.js"></script> <script src="../static/js/interfaces/input/webcam.js"></script>
<script src="../static/js/interfaces/input/microphone.js"></script> <script src="../static/js/interfaces/input/microphone.js"></script>
<script src="../static/js/vendor/wavesurfer.min.js"></script>
<script src="../static/js/vendor/p5.min.js"></script>
<script src="../static/js/vendor/p5.sound.min.js"></script>
<script src="../static/js/vendor/p5.dom.min.js"></script>
<script src="../static/js/interfaces/output/image.js"></script> <script src="../static/js/interfaces/output/image.js"></script>
<script src="../static/js/interfaces/output/label.js"></script> <script src="../static/js/interfaces/output/label.js"></script>
<script src="../static/js/interfaces/output/key_values.js"></script> <script src="../static/js/interfaces/output/key_values.js"></script>
<script src="../static/js/interfaces/output/textbox.js"></script> <script src="../static/js/interfaces/output/textbox.js"></script>
<script src="../static/js/interfaces/output/highlighted_text.js"></script>
<script src="../static/js/interfaces/output/audio.js"></script>
<script src="../static/js/interfaces/output/json.js"></script>
<script src="../static/js/interfaces/output/html.js"></script>
<script src="../static/js/interfaces/output/dataframe.js"></script>
<script src="../static/js/interfaces/output/file.js"></script>
<script src="../static/js/gradio.js"></script> <script src="../static/js/gradio.js"></script>
<script> <script>
$.getJSON("static/config.json", function(config) { $.getJSON("static/config.json", function(config) {

View File

@ -7,11 +7,14 @@ BASE64_SKETCH = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAT4AAAE+CAYAAAAUO
RAND_STRING = "2wBDAAYEBQYFBAYGBQYHBwYIC" RAND_STRING = "2wBDAAYEBQYFBAYGBQYHBwYIC"
PACKAGE_NAME = 'gradio' PACKAGE_NAME = 'gradio'
# Where to find the static resources associated with each template.
BASE_INPUT_INTERFACE_JS_PATH = 'static/js/interfaces/input/{}.js'
class TestSketchpad(unittest.TestCase): class TestSketchpad(unittest.TestCase):
def test_path_exists(self): def test_path_exists(self):
inp = inputs.Sketchpad() inp = inputs.Sketchpad()
path = inputs.BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower()) path = BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower())
self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path))) self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path)))
def test_preprocessing(self): def test_preprocessing(self):
@ -23,7 +26,7 @@ class TestSketchpad(unittest.TestCase):
class TestWebcam(unittest.TestCase): class TestWebcam(unittest.TestCase):
def test_path_exists(self): def test_path_exists(self):
inp = inputs.Webcam() inp = inputs.Webcam()
path = inputs.BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower()) path = BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower())
self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path))) self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path)))
def test_preprocessing(self): def test_preprocessing(self):
@ -35,7 +38,7 @@ class TestWebcam(unittest.TestCase):
class TestTextbox(unittest.TestCase): class TestTextbox(unittest.TestCase):
def test_path_exists(self): def test_path_exists(self):
inp = inputs.Textbox() inp = inputs.Textbox()
path = inputs.BASE_INPUT_INTERFACE_JS_PATH.format( path = BASE_INPUT_INTERFACE_JS_PATH.format(
inp.__class__.__name__.lower()) inp.__class__.__name__.lower())
self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path))) self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path)))
@ -48,7 +51,7 @@ class TestTextbox(unittest.TestCase):
class TestImageUpload(unittest.TestCase): class TestImageUpload(unittest.TestCase):
def test_path_exists(self): def test_path_exists(self):
inp = inputs.Image() inp = inputs.Image()
path = inputs.BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower()) path = BASE_INPUT_INTERFACE_JS_PATH.format(inp.__class__.__name__.lower())
self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path))) self.assertTrue(os.path.exists(os.path.join(PACKAGE_NAME, path)))
def test_preprocessing(self): def test_preprocessing(self):

File diff suppressed because one or more lines are too long