pre eject

This commit is contained in:
Ali Abid 2021-05-24 08:31:44 -07:00
parent 12223660f1
commit c373b2754b
34 changed files with 1180 additions and 244 deletions

View File

@ -21,7 +21,8 @@ iface = gr.Interface(calculator,
[-4, "multiply", 2.5],
[0, "subtract", 1.2],
],
title="test calculator"
title="test calculator",
description="heres a sample toy calculator. enjoy!"
)
if __name__ == "__main__":

33
demo/digit_classifier.py Normal file
View File

@ -0,0 +1,33 @@
# Demo: (Image) -> (Label)
import tensorflow as tf
import gradio
import gradio as gr
from urllib.request import urlretrieve
import os
urlretrieve("https://gr-models.s3-us-west-2.amazonaws.com/mnist-model.h5", "mnist-model.h5")
model = tf.keras.models.load_model("mnist-model.h5")
def recognize_digit(image):
image = image.reshape(1, -1)
prediction = model.predict(image).tolist()[0]
return {str(i): prediction[i] for i in range(10)}
im = gradio.inputs.Image(shape=(28, 28), image_mode='L', invert_colors=False, source="canvas")
iface = gr.Interface(
recognize_digit,
im,
gradio.outputs.Label(num_top_classes=3),
live=True,
interpretation="default",
capture_session=True,
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()

39
demo/face_segment.py Normal file
View File

@ -0,0 +1,39 @@
import gradio as gr
import os, sys
file_folder = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(file_folder, "utils"))
from FCN8s_keras import FCN
from PIL import Image
import cv2
import tensorflow as tf
from drive import download_file_from_google_drive
import numpy as np
weights = os.path.join(file_folder, "face_seg_model_weights.h5")
if not os.path.exists(weights):
file_id = "1IerDF2DQqmJWqyvxYZOICJT1eThnG8WR"
download_file_from_google_drive(file_id, weights)
model1 = FCN()
model1.load_weights(weights)
def segment_face(inp):
im = Image.fromarray(np.uint8(inp))
im = im.resize((500, 500))
in_ = np.array(im, dtype=np.float32)
in_ = in_[:, :, ::-1]
in_ -= np.array((104.00698793,116.66876762,122.67891434))
in_ = in_[np.newaxis,:]
out = model1.predict(in_)
out_resized = cv2.resize(np.squeeze(out), (inp.shape[1], inp.shape[0]))
out_resized_clipped = np.clip(out_resized.argmax(axis=2), 0, 1).astype(np.float64)
result = (out_resized_clipped[:, :, np.newaxis] + 0.25)/1.25 * inp.astype(np.float64).astype(np.uint8)
return result / 255
iface = gr.Interface(segment_face, gr.inputs.Image(source="webcam", tool=None), "image", capture_session=True)
if __name__ == "__main__":
iface.launch()

23
demo/filter_records.py Normal file
View File

@ -0,0 +1,23 @@
# Demo: (Dataframe, Dropdown) -> (Dataframe)
import gradio as gr
import numpy as np
import random
def filter_records(records, gender):
return records[records['gender'] == gender]
iface = gr.Interface(filter_records,
[
gr.inputs.Dataframe(headers=["name", "age", "gender"], datatype=["str", "number", "str"], row_count=5),
gr.inputs.Dropdown(["M", "F", "O"])
],
"dataframe",
description="Enter gender as 'M', 'F', or 'O' for other."
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()

39
demo/form_graph.py Normal file
View File

@ -0,0 +1,39 @@
import gradio as gr
import random
import matplotlib.pyplot as plt
import numpy as np
def plot_forecast(final_year, companies, noise, show_legend, point_style):
start_year = 2020
x = np.arange(start_year, final_year + 1)
year_count = x.shape[0]
plt_format = ({"cross": "X", "line": "-", "circle": "o--"})[point_style]
fig = plt.figure()
ax = fig.add_subplot(111)
for i, company in enumerate(companies):
series = np.arange(0, year_count, dtype=float)
series = series ** 2 * (i + 1)
series += np.random.rand(year_count) * noise
ax.plot(x, series, plt_format)
if show_legend:
plt.legend(companies)
plt.close()
return fig
iface = gr.Interface(plot_forecast,
[
gr.inputs.Radio([2025, 2030, 2035, 2040],
label="Project to:"),
gr.inputs.CheckboxGroup(
["Google", "Microsoft", "Gradio"], label="Company Selection"),
gr.inputs.Slider(1, 100, label="Noise Level"),
gr.inputs.Checkbox(label="Show Legend"),
gr.inputs.Dropdown(["cross", "line", "circle"], label="Style"),
],
gr.outputs.Image(plot=True, label="forecast")
)
if __name__ == "__main__":
iface.launch()

View File

@ -0,0 +1,29 @@
import gradio as gr
import re
male_words, female_words = ["he", "his", "him"], ["she", "her"]
def gender_of_sentence(sentence):
male_count = len([word for word in sentence.split() if word.lower() in male_words])
female_count = len([word for word in sentence.split() if word.lower() in female_words])
total = max(male_count + female_count, 1)
return {"male": male_count / total, "female": female_count / total}
def interpret_gender(sentence):
result = gender_of_sentence(sentence)
is_male = result["male"] > result["female"]
interpretation = []
for word in re.split('( )', sentence):
score = 0
token = word.lower()
if (is_male and token in male_words) or (not is_male and token in female_words):
score = 1
elif (is_male and token in female_words) or (not is_male and token in male_words):
score = -1
interpretation.append((word, score))
return interpretation
iface = gr.Interface(
fn=gender_of_sentence, inputs=gr.inputs.Textbox(default="She went to his house to get her keys."),
outputs="label", interpretation=interpret_gender)
if __name__ == "__main__":
iface.launch()

View File

@ -0,0 +1,15 @@
import gradio as gr
import re
male_words, female_words = ["he", "his", "him"], ["she", "her"]
def gender_of_sentence(sentence):
male_count = len([word for word in sentence.split() if word.lower() in male_words])
female_count = len([word for word in sentence.split() if word.lower() in female_words])
total = max(male_count + female_count, 1)
return {"male": male_count / total, "female": female_count / total}
iface = gr.Interface(
fn=gender_of_sentence, inputs=gr.inputs.Textbox(default="She went to his house to get her keys."),
outputs="label", interpretation="default")
if __name__ == "__main__":
iface.launch()

8
demo/hello_world.py Normal file
View File

@ -0,0 +1,8 @@
import gradio as gr
def greet(name):
return "Hello " + name + "!"
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
if __name__ == "__main__":
iface.launch()

11
demo/hello_world_2.py Normal file
View File

@ -0,0 +1,11 @@
import gradio as gr
def greet(name):
return "Hello " + name + "!"
iface = gr.Interface(
fn=greet,
inputs=gr.inputs.Textbox(lines=2, placeholder="Name Here..."),
outputs="text")
if __name__ == "__main__":
iface.launch()

15
demo/hello_world_3.py Normal file
View File

@ -0,0 +1,15 @@
import gradio as gr
def greet(name, is_morning, temperature):
salutation = "Good morning" if is_morning else "Good evening"
greeting = "%s %s. It is %s degrees today" % (
salutation, name, temperature)
celsius = (temperature - 32) * 5 / 9
return greeting, round(celsius, 2)
iface = gr.Interface(
fn=greet,
inputs=["text", "checkbox", gr.inputs.Slider(0, 100)],
outputs=["text", "number"])
if __name__ == "__main__":
iface.launch()

18
demo/longest_word.py Normal file
View File

@ -0,0 +1,18 @@
# Demo: (Textbox) -> (Label)
import gradio as gr
def longest_word(text):
words = text.split(" ")
lengths = [len(word) for word in words]
return max(lengths)
ex = "The quick brown fox jumped over the lazy dog."
iface = gr.Interface(longest_word, "textbox", "label",
interpretation="default", examples=[[ex]])
iface.test_launch()
if __name__ == "__main__":
iface.launch()

26
demo/matrix_transpose.py Normal file
View File

@ -0,0 +1,26 @@
# Demo: (Dataframe) -> (Dataframe)
import gradio as gr
import numpy as np
def transpose(matrix):
return matrix.T
iface = gr.Interface(
transpose,
gr.inputs.Dataframe(type="numpy", datatype="number", row_count=5, col_count=3),
"numpy",
examples=[
[np.zeros((3,3)).tolist()],
[np.ones((2,2)).tolist()],
[np.random.randint(0, 10, (3,10)).tolist()],
[np.random.randint(0, 10, (10,3)).tolist()],
[np.random.randint(0, 10, (10,10)).tolist()],
]
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()

35
demo/outbreak_forecast.py Normal file
View File

@ -0,0 +1,35 @@
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
def outbreak(r, month, countries, social_distancing):
months = ["January", "February", "March", "April", "May"]
m = months.index(month)
start_day = 30 * m
final_day = 30 * (m + 1)
x = np.arange(start_day, final_day+1)
day_count = x.shape[0]
pop_count = {"USA": 350, "Canada": 40, "Mexico": 300, "UK": 120}
r = sqrt(r)
if social_distancing:
r = sqrt(r)
for i, country in enumerate(countries):
series = x ** (r) * (i + 1)
plt.plot(x, series)
plt.title("Outbreak in " + month)
plt.ylabel("Cases")
plt.xlabel("Days since Day 0")
plt.legend(countries)
return plt
iface = gr.Interface(outbreak,
[
gr.inputs.Slider(1, 4, default=3.2, label="R"),
gr.inputs.Dropdown(["January", "February", "March", "April", "May"], label="Month"),
gr.inputs.CheckboxGroup(["USA", "Canada", "Mexico", "UK"], label="Countries"),
gr.inputs.Checkbox(label="Social Distancing?"),
],
"plot")
if __name__ == "__main__":
iface.launch()

18
demo/question_answer.py Normal file
View File

@ -0,0 +1,18 @@
import gradio as gr
import os, sys
file_folder = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(file_folder, "utils"))
from bert import QA
model = QA('bert-large-uncased-whole-word-masking-finetuned-squad')
def qa_func(paragraph, question):
return model.predict(paragraph, question)["answer"]
iface = gr.Interface(qa_func,
[
gr.inputs.Textbox(lines=7, label="Context", default="Victoria has a written constitution enacted in 1975, but based on the 1855 colonial constitution, passed by the United Kingdom Parliament as the Victoria Constitution Act 1855, which establishes the Parliament as the state's law-making body for matters coming under state responsibility. The Victorian Constitution can be amended by the Parliament of Victoria, except for certain 'entrenched' provisions that require either an absolute majority in both houses, a three-fifths majority in both houses, or the approval of the Victorian people in a referendum, depending on the provision."),
gr.inputs.Textbox(lines=1, label="Question", default="When did Victoria enact its constitution?"),
],
gr.outputs.Textbox(label="Answer"))
if __name__ == "__main__":
iface.launch()

22
demo/spectogram.py Normal file
View File

@ -0,0 +1,22 @@
# Demo: (Audio) -> (Image)
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
def spectrogram(audio):
sr, data = audio
if len(data.shape) == 2:
data = np.mean(data, axis=0)
frequencies, times, spectrogram_data = signal.spectrogram(data, sr, window="hamming")
plt.pcolormesh(times, frequencies, np.log10(spectrogram_data))
return plt
iface = gr.Interface(spectrogram, "audio", "plot")
iface.test_launch()
if __name__ == "__main__":
iface.launch()

38
demo/stock_forecast.py Normal file
View File

@ -0,0 +1,38 @@
# Demo: (Radio, CheckboxGroup, Slider, Checkbox, Dropdown) -> (Image)
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
def stock_forecast(final_year, companies, noise, show_legend, point_style):
start_year = 2020
x = np.arange(start_year, final_year + 1)
year_count = x.shape[0]
plt_format = ({"cross": "X", "line": "-", "circle": "o--"})[point_style]
fig = plt.figure()
ax = fig.add_subplot(111)
for i, company in enumerate(companies):
series = np.arange(0, year_count, dtype=float)
series = series ** 2 * (i + 1)
series += np.random.rand(year_count) * noise
ax.plot(x, series, plt_format)
if show_legend:
plt.legend(companies)
plt.close()
return fig
iface = gr.Interface(
stock_forecast,
[
gr.inputs.Radio([2025, 2030, 2035, 2040], label="Project to:"),
gr.inputs.CheckboxGroup(["Google", "Microsoft", "Gradio"]),
gr.inputs.Slider(1, 100),
"checkbox",
gr.inputs.Dropdown(["cross", "line", "circle"], label="Style")],
gr.outputs.Image(plot=True, label="forecast"))
iface.test_launch()
if __name__ == "__main__":
iface.launch()

View File

@ -0,0 +1,22 @@
# Demo: (Image) -> (Image)
import gradio as gr
def detect(image):
# return image
return (image, [("sign", 210, 80, 280, 150), ("train", 100, 100, 180, 150)])
iface = gr.Interface(detect,
gr.inputs.Image(type="pil"),
# "image",
"segmented_image",
examples=[
["images/stop_1.jpg"],
["images/stop_2.jpg"],
])
iface.test_launch()
if __name__ == "__main__":
iface.launch()

48
demo/tax_calculator.py Normal file
View File

@ -0,0 +1,48 @@
# Demo: (Number, Radio, Dataframe) -> (Textbox)
import gradio as gr
def tax_calculator(income, marital_status, assets):
tax_brackets = [
(10, 0),
(25, 8),
(60, 12),
(120, 20),
(250, 30)
]
total_deductible = sum(assets[assets["Deduct"]]["Cost"])
taxable_income = income - total_deductible
total_tax = 0
for bracket, rate in tax_brackets:
if taxable_income > bracket:
total_tax += (taxable_income - bracket) * rate / 100
if marital_status == "Married":
total_tax *= 0.75
elif marital_status == "Divorced":
total_tax *= 0.8
return round(total_tax)
iface = gr.Interface(
tax_calculator,
[
"number",
gr.inputs.Radio(["Single", "Married", "Divorced"]),
gr.inputs.Dataframe(
headers=["Item", "Cost", "Deduct"],
datatype=["str", "number", "bool"],
label="Assets Purchased this Year"
)
],
"number",
interpretation="default",
examples=[
[10000, "Married", [["Car", 5000, False], ["Laptop", 800, True]]],
[80000, "Single", [["Suit", 800, True], ["Watch", 1800, False]]],
]
)
if __name__ == "__main__":
iface.launch()

40
demo/text_analysis.py Normal file
View File

@ -0,0 +1,40 @@
# Demo: (Textbox) -> (HighlightedText, KeyValues, HTML)
import spacy
from spacy import displacy
import gradio as gr
nlp = spacy.load("en_core_web_sm")
def text_analysis(text):
doc = nlp(text)
html = displacy.render(doc, style="dep", page=True)
html = "<div style='max-width:100%; max-height:360px; overflow:auto'>" + html + "</div>"
pos_count = {
"char_count": len(text),
"token_count": 0,
}
pos_tokens = []
for token in doc:
pos_tokens.extend([(token.text, token.pos_), (" ", None)])
return pos_tokens, pos_count, html
iface = gr.Interface(
text_analysis,
gr.inputs.Textbox(placeholder="Enter sentence here..."),
[
"highlight", "key_values", "html"
],
examples=[
["What a beautiful morning for a walk!"],
["It was the best of times, it was the worst of times."],
]
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()

View File

@ -19,6 +19,7 @@
"react-dom": "^17.0.2",
"react-json-tree": "^0.15.0",
"react-scripts": "4.0.3",
"react-sketch": "^0.5.1",
"react-webcam": "^5.2.3",
"sass": "^1.32.8",
"web-vitals": "^1.1.1"
@ -15152,6 +15153,55 @@
"node": ">=0.10.0"
}
},
"node_modules/react-sketch": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/react-sketch/-/react-sketch-0.5.1.tgz",
"integrity": "sha512-a0BPc6z/Ec/7KN4KL/HGLfAhDfLyaBN8GDvILfBZVX74PnhWvJocofc/cHSCvtNtEmGSdmJPjrewfAUXKo78pA==",
"dependencies": {
"prop-types": "^15.6.2",
"react": "^16.6.1",
"react-dom": "^16.6.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/react-sketch/node_modules/react": {
"version": "16.14.0",
"resolved": "https://registry.npmjs.org/react/-/react-16.14.0.tgz",
"integrity": "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2"
},
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-sketch/node_modules/react-dom": {
"version": "16.14.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz",
"integrity": "sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2",
"scheduler": "^0.19.1"
},
"peerDependencies": {
"react": "^16.14.0"
}
},
"node_modules/react-sketch/node_modules/scheduler": {
"version": "0.19.1",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.19.1.tgz",
"integrity": "sha512-n/zwRWRYSUj0/3g/otKDRPMh6qv2SYMWNq85IEa8iZyAv8od9zDYpGSnpBEjNgcMNq6Scbu5KfIPxNF72R/2EA==",
"dependencies": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1"
}
},
"node_modules/react-webcam": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/react-webcam/-/react-webcam-5.2.3.tgz",
@ -32598,6 +32648,48 @@
}
}
},
"react-sketch": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/react-sketch/-/react-sketch-0.5.1.tgz",
"integrity": "sha512-a0BPc6z/Ec/7KN4KL/HGLfAhDfLyaBN8GDvILfBZVX74PnhWvJocofc/cHSCvtNtEmGSdmJPjrewfAUXKo78pA==",
"requires": {
"prop-types": "^15.6.2",
"react": "^16.6.1",
"react-dom": "^16.6.1"
},
"dependencies": {
"react": {
"version": "16.14.0",
"resolved": "https://registry.npmjs.org/react/-/react-16.14.0.tgz",
"integrity": "sha512-0X2CImDkJGApiAlcf0ODKIneSwBPhqJawOa5wCtKbu7ZECrmS26NvtSILynQ66cgkT/RJ4LidJOc3bUESwmU8g==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2"
}
},
"react-dom": {
"version": "16.14.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-16.14.0.tgz",
"integrity": "sha512-1gCeQXDLoIqMgqD3IO2Ah9bnf0w9kzhwN5q4FGnHZ67hBm9yePzB5JJAIQCc8x3pFnNlwFq4RidZggNAAkzWWw==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1",
"prop-types": "^15.6.2",
"scheduler": "^0.19.1"
}
},
"scheduler": {
"version": "0.19.1",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.19.1.tgz",
"integrity": "sha512-n/zwRWRYSUj0/3g/otKDRPMh6qv2SYMWNq85IEa8iZyAv8od9zDYpGSnpBEjNgcMNq6Scbu5KfIPxNF72R/2EA==",
"requires": {
"loose-envify": "^1.1.0",
"object-assign": "^4.1.1"
}
}
}
},
"react-webcam": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/react-webcam/-/react-webcam-5.2.3.tgz",

View File

@ -14,6 +14,7 @@
"react-dom": "^17.0.2",
"react-json-tree": "^0.15.0",
"react-scripts": "4.0.3",
"react-sketch": "^0.5.1",
"react-webcam": "^5.2.3",
"sass": "^1.32.8",
"web-vitals": "^1.1.1"

View File

@ -53,7 +53,7 @@ let output_component_map = {
"html": [HTMLOutput, HTMLOutputExample],
"image": [ImageOutput, ImageOutputExample],
"json": [JSONOutput, JSONOutputExample],
"key_values": [KeyValuesOutput, KeyValuesOutputExample],
"keyvalues": [KeyValuesOutput, KeyValuesOutputExample],
"label": [LabelOutput, LabelOutputExample],
"textbox": [TextboxOutput, TextboxOutputExample],
"video": [VideoOutput, VideoOutputExample],
@ -64,6 +64,7 @@ export class GradioInterface extends React.Component {
super(props);
this.clear = this.clear.bind(this);
this.submit = this.submit.bind(this);
this.flag = this.flag.bind(this);
this.handleExampleChange = this.handleExampleChange.bind(this);
this.state = this.get_default_state();
this.state["examples_page"] = 0;
@ -84,8 +85,10 @@ export class GradioInterface extends React.Component {
for (let [i, component] of this.props.output_components.entries()) {
state[index_start + i] = component.default !== undefined ? component.default : null;
}
state["predicting"] = false;
state["submitting"] = false;
state["error"] = false;
state["complete"] = false;
state["just_flagged"] = false;
state["has_changed"] = false;
state["example_id"] = null;
return state;
@ -107,12 +110,35 @@ export class GradioInterface extends React.Component {
for (let [i, value] of output["data"].entries()) {
this.setState({ [index_start + i]: value });
}
this.setState({ "submitting": false });
this.setState({ "submitting": false, "complete": true });
if (this.state.has_changed) {
this.submit();
}
}).catch(e => {
console.error(e);
this.setState({
"error": true,
"submitting": false
});
});
}
flag() {
if (!this.state.complete) {
return;
}
let component_state = { "input_data": [], "output_data": [] };
for (let i = 0; i < this.props.input_components.length; i++) {
component_state["input_data"].push(this.state[i]);
}
for (let i = 0; i < this.props.output_components.length; i++) {
component_state["output_data"].push(this.state[this.props.input_components.length + i]);
}
this.setState({ "just_flagged": true });
window.setTimeout(() => {
this.setState({ "just_flagged": false });
}, 1000)
this.props.fn(component_state, "flag");
}
handleChange(_id, value) {
let state_change = { [_id]: value, "has_changed": true };
if (this.props.live && !(this.state.submitting)) {
@ -123,7 +149,7 @@ export class GradioInterface extends React.Component {
}
handleExampleChange(example_id) {
let state_change = {};
this.setState({"example_id": example_id});
this.setState({ "example_id": example_id });
for (let [i, item] of this.props.examples[example_id].entries()) {
let ExampleComponent = i < this.props.input_components.length ? input_component_map[this.props.input_components[i].name][1] : output_component_map[this.props.output_components[i - this.props.input_components.length].name][1]
state_change[i] = ExampleComponent.preprocess(item, this.examples_dir).then((data) => {
@ -138,7 +164,7 @@ export class GradioInterface extends React.Component {
let status = false;
if (this.state.submitting) {
status = (<div className="loading">
<img className="h-4" alt="loading" src={logo_loading} />
<img alt="loading" src={logo_loading} />
</div>)
} else if (this.state.error) {
status = (<div className="loading">
@ -166,9 +192,9 @@ export class GradioInterface extends React.Component {
})}
</div>
<div className="panel_buttons">
<button className="panel_button" onClick={this.clear.bind(this)}>Clear</button>
<button className="panel_button" onClick={this.clear}>Clear</button>
{this.props.live ? false :
<button className="panel_button submit" onClick={this.submit.bind(this)}>Submit</button>
<button className="panel_button submit" onClick={this.submit}>Submit</button>
}
</div>
</div>
@ -205,25 +231,15 @@ export class GradioInterface extends React.Component {
</div>
: false}
{this.props.allow_flagging ?
<button className="panel_button">
Flag
{/* <div className="dropcontent"></div> */}
<button className={classNames("panel_button", { "disabled": this.state.complete === false })}
onClick={this.flag}>
{this.state.just_flagged ? "Flagged" : "Flag"}
{/* <div className="dropcontent"></div> */}
</button>
: false}
</div>
</div>
</div>
{this.state.show_interpretation ?
<div className="interpretation_explained">
<h4>Interpretation Legend <span className='close_explain'>&#10006;</span></h4>
<div className='interpretation_legend'>
<div>&larr; Decreased output score / confidence</div>
<div>Increased output score / confidence &rarr;</div>
</div>
<p>When you click Interpret, you can see how different parts of the input contributed to the final output. The legend above will highlight each of the input components as follows:</p>
<ul></ul>
</div>
: false}
{this.props.examples ? <GradioInterfaceExamples examples={this.props.examples} examples_dir={this.examples_dir} example_id={this.state.example_id} input_components={this.props.input_components} output_components={this.props.output_components} handleExampleChange={this.handleExampleChange} /> : false}
{article}
</div>
@ -246,7 +262,7 @@ class GradioInterfaceExamples extends React.Component {
</button>
</div>
<div className="pages hidden">Page:</div>
<table>
<table className="examples_table">
<thead>
<tr>
{this.props.input_components.map((component, i) => {

View File

@ -10,6 +10,13 @@ class AudioInput extends React.Component {
}
this.src = null;
this.key = 0; // needed to prevent audio caching
this.uploader = React.createRef();
this.openFileUpload = this.openFileUpload.bind(this);
this.load_preview_from_files = this.load_preview_from_files.bind(this);
this.load_preview_from_upload = this.load_preview_from_upload.bind(this);
this.load_preview_from_drop = this.load_preview_from_drop.bind(this);
}
start = () => {
this.setState({
@ -26,6 +33,9 @@ class AudioInput extends React.Component {
reader.onload = (function (e) { this.props.handleChange(e.target.result) }).bind(this);
reader.readAsDataURL(audioData.blob);
}
openFileUpload() {
this.uploader.current.click();
}
render() {
if (this.props.value !== null) {
if (this.props.value !== this.src) {
@ -38,13 +48,44 @@ class AudioInput extends React.Component {
</audio>
</div>);
} else {
return (<div className="input_audio">
<AudioReactRecorder state={this.state.recordState} onStop={this.onStop} />
{this.state.recordState === RecordState.STOP ?
<button className="start" onClick={this.start}>Record</button> :
<button className="stop" onClick={this.stop}>Recording...</button>
if (this.props.source === "microphone") {
return (<div className="input_audio">
<AudioReactRecorder state={this.state.recordState} onStop={this.onStop} />
{this.state.recordState === RecordState.STOP ?
<button className="start" onClick={this.start}>Record</button> :
<button className="stop" onClick={this.stop}>Recording...</button>
}
</div>);
} else if (this.props.source === "upload") {
let no_action = (evt) => {
evt.preventDefault();
evt.stopPropagation();
}
</div>);
return (
<div className="input_image" onDrag={no_action} onDragStart={no_action} onDragEnd={no_action} onDragOver={no_action} onDragEnter={no_action} onDragLeave={no_action} onDrop={no_action} >
<div className="upload_zone" onClick={this.openFileUpload} onDrop={this.load_preview_from_drop}>
Drop Audio Here<br />- or -<br />Click to Upload
</div>
<input className="hidden_upload" type="file" ref={this.uploader} onChange={this.load_preview_from_upload} accept="audio/*" style={{ display: "none" }} />
</div>);
}
}
}
load_preview_from_drop(evt) {
this.load_preview_from_files(evt.dataTransfer.files)
}
load_preview_from_upload(evt) {
this.load_preview_from_files(evt.target.files);
}
load_preview_from_files(files) {
if (!files.length || !window.FileReader) {
return;
}
var component = this;
var ReaderObj = new FileReader()
ReaderObj.readAsDataURL(files[0])
ReaderObj.onloadend = function () {
component.props.handleChange(this.result);
}
}
}

View File

@ -1,13 +1,13 @@
import React from 'react';
import ComponentExample from '../component_example';
import jspreadsheet from "jspreadsheet-ce";
import "../../../node_modules/jspreadsheet-ce/dist/jspreadsheet.css"
class DataframeInput extends React.Component {
constructor(props) {
super(props);
this.wrapper = React.createRef();
this.handleChange = this.handleChange.bind(this);
this.getEmptyArray = this.getEmptyArray.bind(this);
this.getConfig = this.getConfig.bind(this);
}
componentDidMount = function () {
@ -24,13 +24,13 @@ class DataframeInput extends React.Component {
let column_config = [];
for (let i = 0; i < col_count; i++) {
let column = {};
if (this.opts.datatype) {
let datatype = typeof this.opts.datatype === "string" ? this.opts.datatype : this.opts.datatype[i];
if (this.props.datatype) {
let datatype = typeof this.props.datatype === "string" ? this.props.datatype : this.props.datatype[i];
let datatype_map = { "str": "text", "bool": "checkbox", "number": "numeric", "date": "calendar" }
column.type = datatype_map[datatype];
}
if (this.opts.headers) {
column.title = this.opts.headers[i];
if (this.props.headers) {
column.title = this.props.headers[i];
}
column_config.push(column);
}
@ -39,9 +39,25 @@ class DataframeInput extends React.Component {
config.data = this.props.value;
return config;
}
resetData(new_data) {
let [new_rows, new_cols] = [new_data.length, new_data[0].length];
let current_data = this.el.getData();
let [cur_rows, cur_cols] = [current_data.length, current_data[0].length];
if (cur_rows < new_rows) {
this.el.insertRow(new_rows - cur_rows);
} else if (cur_rows > new_rows) {
this.el.deleteRow(0, cur_rows - new_rows);
}
if (cur_cols < new_cols) {
this.el.insertColumn(new_cols - cur_cols);
} else if (cur_cols > new_cols) {
this.el.deleteColumn(0, cur_cols - new_cols);
}
this.el.setData(new_data);
}
render() {
if (JSON.stringify(this.props.value) !== JSON.stringify(this.data)) {
this.el.setData(this.props.value);
if (JSON.stringify(this.props.value) !== JSON.stringify(this.data) && this.el) {
this.resetData(this.props.value);
this.data = this.props.value;
}
return (

View File

@ -54,7 +54,7 @@ class FileInput extends React.Component {
<div className="upload_zone" onClick={this.openFileUpload} onDrop={this.load_preview_from_drop}>
Drop File Here<br />- or -<br />Click to Upload
</div>
<input className="hidden_upload" type="file" multiple={this.props.file_count === "multiple"} webkitdirectory={this.props.file_count === "directory"} mozdirectory={this.props.file_count === "directory"} ref={this.uploader} onChange={this.load_preview_from_upload} accept="image/x-png,image/gif,image/jpeg" style={{display: "none"}} />
<input className="hidden_upload" type="file" multiple={this.props.file_count === "multiple"} webkitdirectory={this.props.file_count === "directory"} mozdirectory={this.props.file_count === "directory"} ref={this.uploader} onChange={this.load_preview_from_upload} style={{display: "none"}} />
</div>)
}
}

View File

@ -1,5 +1,7 @@
import React from 'react';
import {DataURLComponentExample} from '../component_example';
import { DataURLComponentExample } from '../component_example';
import Webcam from "react-webcam";
import { SketchField, Tools } from 'react-sketch';
class ImageInput extends React.Component {
constructor(props) {
@ -10,33 +12,74 @@ class ImageInput extends React.Component {
this.load_preview_from_files = this.load_preview_from_files.bind(this);
this.load_preview_from_upload = this.load_preview_from_upload.bind(this);
this.load_preview_from_drop = this.load_preview_from_drop.bind(this);
this.snapshot = this.snapshot.bind(this);
this.getSketch = this.getSketch.bind(this);
this.webcamRef = React.createRef();
this.sketchRef = React.createRef();
this.sketchKey = 0;
}
handleChange(evt) {
this.props.handleChange(evt.target.value);
handleChange(data) {
this.props.handleChange(data);
}
openFileUpload() {
this.uploader.current.click();
}
snapshot() {
let imageSrc = this.webcamRef.current.getScreenshot();
this.handleChange(imageSrc);
}
getSketch() {
let imageSrc = this.sketchRef.current.toDataURL();
this.handleChange(imageSrc);
}
render() {
let no_action = (evt) => {
evt.preventDefault();
evt.stopPropagation();
evt.stopPropagation();
}
if (this.props.value != null) {
if (this.props.value !== null && this.props.source !== "canvas") {
return (
<div className="input_image">
<div className="image_preview_holder">
<img className="image_preview" alt="" src={this.props.value}/>
<img className="image_preview" alt="" src={this.props.value} />
</div>
</div>)
} else {
return (
<div className="input_image" onDrag={no_action} onDragStart={no_action} onDragEnd={no_action} onDragOver={no_action} onDragEnter={no_action} onDragLeave={no_action} onDrop={no_action} >
<div className="upload_zone" onClick={this.openFileUpload} onDrop={this.load_preview_from_drop}>
Drop Image Here<br />- or -<br />Click to Upload
</div>
<input className="hidden_upload" type="file" ref={this.uploader} onChange={this.load_preview_from_upload} accept="image/x-png,image/gif,image/jpeg" style={{display: "none"}} />
</div>)
if (this.props.source === "upload") {
return (
<div className="input_image" onDrag={no_action} onDragStart={no_action} onDragEnd={no_action} onDragOver={no_action} onDragEnter={no_action} onDragLeave={no_action} onDrop={no_action} >
<div className="upload_zone" onClick={this.openFileUpload} onDrop={this.load_preview_from_drop}>
Drop Image Here<br />- or -<br />Click to Upload
</div>
<input className="hidden_upload" type="file" ref={this.uploader} onChange={this.load_preview_from_upload} accept="image/x-png,image/gif,image/jpeg" style={{ display: "none" }} />
</div>);
} else if (this.props.source === "webcam") {
return (<div className="input_image">
<div className="image_preview_holder">
<Webcam ref={this.webcamRef} />
<button class="snapshot" onClick={this.snapshot}>- Click to Take Snapshot -</button>
</div>
</div>);
} else if (this.props.source === "canvas") {
if (this.props.value === null && this.sketchRef && this.sketchRef.current) {
this.sketchKey += 1;
}
return (<div className="input_image">
<div className="image_preview_holder sketch">
<SketchField
ref={this.sketchRef}
key={this.sketchKey}
width='320px'
height='100%'
tool={Tools.Pencil}
lineColor='black'
lineWidth={20}
backgroundColor="white"
onChange={this.getSketch}
/>
</div>
</div>);
}
}
}
load_preview_from_drop(evt) {
@ -64,4 +107,4 @@ class ImageInputExample extends DataURLComponentExample {
}
}
export {ImageInput, ImageInputExample};
export { ImageInput, ImageInputExample };

View File

@ -1,6 +1,8 @@
import React from 'react';
import ComponentExample from '../component_example';
import jspreadsheet from "jspreadsheet-ce";
import "../../../node_modules/jspreadsheet-ce/dist/jspreadsheet.css"
import classNames from 'classnames';
class DataframeOutput extends React.Component {
constructor(props) {
@ -8,20 +10,35 @@ class DataframeOutput extends React.Component {
this.wrapper = React.createRef();
}
componentDidMount = function() {
this.el = jspreadsheet(this.wrapper.current);
this.el = jspreadsheet(this.wrapper.current, {minDimensions: [1, 1]});
}
resetData(new_data) {
let [new_rows, new_cols] = [new_data.length, new_data[0].length];
let current_data = this.el.getData();
let [cur_rows, cur_cols] = [current_data.length, current_data[0].length];
if (cur_rows < new_rows) {
this.el.insertRow(new_rows - cur_rows);
} else if (cur_rows > new_rows) {
this.el.deleteRow(0, cur_rows - new_rows);
}
if (cur_cols < new_cols) {
this.el.insertColumn(new_cols - cur_cols);
} else if (cur_cols > new_cols) {
this.el.deleteColumn(0, cur_cols - new_cols);
}
this.el.setData(new_data);
}
render() {
if (this.props.value == null) {
return false;
}
if (this.props.value.headers) {
if (this.props.value && this.props.value.headers && this.el) {
for (let [i, header] of this.props.value.headers.entries())
this.el.setHeader(i, header);
}
this.el.setData(this.props.value.data);
if (this.props.value && this.el) {
this.resetData(this.props.value.data)
}
return (
<div className="output_dataframe">
<div ref={this.wrapper} />
<div className={classNames("output_dataframe", {"hidden": this.props.value === null})}>
<div ref={this.wrapper}/>
</div>)
}
}

View File

@ -1,12 +1,13 @@
import React from 'react';
import ComponentExample from '../component_example';
import {prettyBytes} from '../utils';
class FileOutput extends React.Component {
render() {
return this.props.value ? <div className="output_file">
<a className="file_display" href={"data:;base64," + this.props.value.data} download={this.props.value.name}>
<div className="file_name">{this.props.value.name}</div>
<div className="file_size">{this.props.value.size}</div>
<div className="file_size">{this.props.value.size === null ? "" : prettyBytes(this.props.value.size)}</div>
</a>
</div> : false;
}

View File

@ -4,7 +4,7 @@ import ComponentExample from '../component_example';
class HighlightedTextOutput extends React.Component {
constructor(props) {
super(props);
this.color_map = this.props.color_map;
this.color_map = this.props.color_map || {};
this.new_category_index = 0;
}
generate_category_legend(category_map) {

View File

@ -5,7 +5,24 @@ import JSONTree from 'react-json-tree'
class JSONOutput extends React.Component {
render() {
return this.props.value ? <div className="output_json">
<JSONTree data={this.props.value }/>
<JSONTree data={this.props.value} theme={{
base00: '#111',
base01: '#222',
base02: '#333',
base03: '#444',
base04: '#555',
base05: '#666',
base06: '#777',
base07: '#888',
base08: '#999',
base09: '#AAA',
base0A: '#BBB',
base0B: '#CCC',
base0C: '#DDD',
base0D: '#EEE',
base0E: '#EFEFEF',
base0F: '#FFF'
}} invertTheme={true} />
</div> : false;
}
}
@ -14,10 +31,10 @@ class JSONOutputExample extends ComponentExample {
render() {
let output_string = JSON.stringify(this.props.value);
if (output_string.length > 20) {
output_string = output_string.substring(0,20) + "...";
output_string = output_string.substring(0, 20) + "...";
}
return <div className="output_json_example">{output_string}</div>
}
}
export {JSONOutput, JSONOutputExample};
export { JSONOutput, JSONOutputExample };

View File

@ -5,11 +5,17 @@
.gradio_interface[theme="default"] {
@apply container mx-auto my-4;
.title {
@apply text-center;
@apply text-center p-4 text-4xl;
}
.description {
@apply pb-4;
}
.loading {
@apply absolute right-1;
}
.loading img {
@apply h-5;
}
.panels {
@apply flex flex-wrap justify-center gap-4;
}
@ -25,6 +31,9 @@
.panel_button {
@apply flex-grow p-3 rounded bg-gray-100 hover:bg-gray-200 transition font-semibold focus:outline-none;
}
.panel_button.disabled {
@apply text-gray-400 cursor-not-allowed;
}
.panel_button.submit {
@apply bg-yellow-500 hover:bg-yellow-400 text-white;
}
@ -32,7 +41,7 @@
@apply flex gap-4 my-4;
}
.screenshot_set {
@apply flex flex-grow;
@apply hidden flex hidden flex-grow;
}
.panel_button.left_panel_button {
@apply rounded-tr-none rounded-br-none;
@ -45,7 +54,7 @@
@apply text-lg font-semibold my-2;
}
.examples_control {
@apply flex gap-2;
@apply hidden flex hidden gap-2;
}
.examples_control_left {
@apply flex gap-2;
@ -56,7 +65,7 @@
.examples_control button {
@apply bg-gray-100 hover:bg-gray-200 p-2;
}
table {
.examples_table {
@apply table-auto p-2 bg-gray-100 mt-4 rounded;
tbody tr {
@apply cursor-pointer;
@ -91,11 +100,17 @@
@apply border-8 border-gray-300 border-dashed w-full h-full flex justify-center items-center text-3xl text-gray-400 text-center cursor-pointer leading-10;
}
.image_preview_holder {
@apply w-full h-full flex justify-center items-center bg-gray-300;
@apply w-full h-full flex justify-center items-center bg-gray-300 relative inline-block;
}
.image_preview {
.sketch > div {
@apply bg-white;
}
.image_preview, video {
@apply w-full h-full object-contain;
}
.snapshot {
@apply absolute bottom-0 w-full bg-white bg-opacity-90 py-3 font-bold text-lg text-center;
}
}
.input_image_example {
@apply h-24;
@ -236,6 +251,21 @@
@apply w-full h-full object-contain;
}
}
.input_file {
@apply w-full h-80;
.upload_zone {
@apply border-8 border-gray-300 border-dashed w-full h-full flex justify-center items-center text-3xl text-gray-400 text-center cursor-pointer leading-10;
}
.file_preview_holder {
@apply w-full h-full flex flex-col justify-center items-center relative inline-block;
}
.file_name {
@apply text-6xl p-6;
}
.file_size {
@apply text-2xl p-2;
}
}
/* Output Components */
.output_text {
@apply w-full bg-white border-gray-400 rounded box-border p-1;
@ -282,10 +312,33 @@
}
}
}
.output_keyvalues {
table {
@apply bg-white;
thead {
@apply font-bold;
}
td, th {
@apply p-2;
}
}
}
.output_video {
@apply w-full h-80 object-contain flex justify-center;
video {
@apply h-full;
}
}
.output_file {
@apply w-full h-80;
.file_display {
@apply w-full h-full flex flex-col justify-center items-center relative inline-block;
}
.file_name {
@apply text-6xl p-6;
}
.file_size {
@apply text-2xl p-2;
}
}
}

View File

@ -22,6 +22,7 @@ from ffmpy import FFmpeg
import math
import tempfile
from pandas.api.types import is_bool_dtype, is_numeric_dtype, is_string_dtype
from pathlib import Path
class InputComponent(Component):
@ -910,48 +911,62 @@ class Audio(InputComponent):
class File(InputComponent):
"""
Component accepts generic file uploads.
Input type: Union[file-object, bytes]
Input type: Union[file-object, bytes, List[Union[file-object, bytes]]]
"""
def __init__(self, type="file", label=None):
def __init__(self, file_count="single", type="file", label=None, keep_filename=True):
'''
Parameters:
file_count (str): if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
type (str): Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name, "binary" returns an bytes object.
keep_filename (bool): whether to keep the original filename in the f.name field upon upload. If true, will place 'originalfilename' + a '_' before the unique temporary safe filename string and extension
label (str): component name in interface.
'''
self.file_count = file_count
self.type = type
self.test_input = None
self.keep_filename = keep_filename
super().__init__(label)
def get_template_context(self):
return {
"file_count": self.file_count,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"file": {},
"files": {"file_count": "multiple"},
}
def preprocess(self, x):
name, data, is_local_example = x["name"], x["data"], x["is_local_example"]
if self.type == "file":
if is_local_example:
return open(name)
def process_single_file(f):
name, data, is_local_example = f["name"], f["data"], f["is_local_example"]
if self.type == "file":
if is_local_example:
return open(name)
else:
if self.keep_filename:
filename_prefix=Path(name).stem+'_'
else:
filename_prefix=""
return processing_utils.decode_base64_to_file(data, filename_prefix=filename_prefix)
elif self.type == "bytes":
if is_local_example:
with open(name, "rb") as file_data:
return file_data.read()
return processing_utils.decode_base64_to_binary(data)[0]
else:
return processing_utils.decode_base64_to_file(data)
elif self.type == "bytes":
if is_local_example:
with open(name, "rb") as file_data:
return file_data.read()
return processing_utils.decode_base64_to_binary(data)[0]
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'file', 'bytes'.")
if self.file_count == "single":
if isinstance(x, list):
return process_single_file(x[0])
else:
return process_single_file(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'file', 'bytes'.")
def embed(self, x):
raise NotImplementedError("File doesn't currently support embeddings")
def save_flagged(self, dir, label, data):
"""
Returns: (str) path to file
"""
return self.save_flagged_file(dir, label, data["data"])
return [process_single_file(f) for f in x]
class Dataframe(InputComponent):
@ -960,7 +975,7 @@ class Dataframe(InputComponent):
Input type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]
"""
def __init__(self, headers=None, row_count=3, col_count=3, datatype="str", type="pandas", label=None):
def __init__(self, headers=None, row_count=3, col_count=3, datatype="str", default=None, type="pandas", label=None):
"""
Parameters:
headers (List[str]): Header names to dataframe.
@ -1058,143 +1073,3 @@ class Dataframe(InputComponent):
def restore_flagged(self, data):
return json.loads(data)
#######################
# DEPRECATED COMPONENTS
#######################
class Sketchpad(InputComponent):
"""
DEPRECATED. Component creates a sketchpad for black and white illustration. Provides numpy array of shape `(width, height)` as an argument to the wrapped function.
Input type: numpy.array
"""
def __init__(self, shape=(28, 28), invert_colors=True,
flatten=False, label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to.
invert_colors (bool): whether to represent black as 1 and white as 0 in the numpy array.
flatten (bool): whether to reshape the numpy array to a single dimension.
label (str): component name in interface.
'''
warnings.warn("Sketchpad has been deprecated. Please use 'Image' component to generate a sketchpad. The string shorcut 'sketchpad' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0]
self.image_height = shape[1]
self.invert_colors = invert_colors
self.flatten = flatten
super().__init__(label)
def preprocess(self, x):
"""
Default preprocessing method for the SketchPad is to convert the sketch to black and white and resize 28x28
"""
im_transparent = processing_utils.decode_base64_to_image(x)
# Create a white background for the alpha channel
im = PIL.Image.new("RGBA", im_transparent.size, "WHITE")
im.paste(im_transparent, (0, 0), im_transparent)
im = im.convert('L')
if self.invert_colors:
im = PIL.ImageOps.invert(im)
im = im.resize((self.image_width, self.image_height))
if self.flatten:
array = np.array(im).flatten().reshape(
1, self.image_width * self.image_height)
else:
array = np.array(im).flatten().reshape(
1, self.image_width, self.image_height)
return array
def process_example(self, example):
return processing_utils.encode_file_to_base64(example)
def save_flagged(self, dir, label, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save(f'{dir}/{filename}', 'PNG')
return filename
class Webcam(InputComponent):
"""
DEPRECATED. Component creates a webcam for captured image input. Provides numpy array of shape `(width, height, 3)` as an argument to the wrapped function.
Input type: numpy.array
"""
def __init__(self, shape=(224, 224), label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to.
label (str): component name in interface.
'''
warnings.warn("Webcam has been deprecated. Please use 'Image' component to generate a webcam. The string shorcut 'webcam' has been moved to the Image component.", DeprecationWarning)
self.image_width = shape[0]
self.image_height = shape[1]
self.num_channels = 3
super().__init__(label)
def preprocess(self, x):
"""
Default preprocessing method for is to convert the picture to black and white and resize to be 48x48
"""
im = processing_utils.decode_base64_to_image(x)
im = im.convert('RGB')
im = processing_utils.resize_and_crop(
im, (self.image_width, self.image_height))
return np.array(im)
def save_flagged(self, dir, label, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save('{}/{}'.format(dir, filename), 'PNG')
return filename
class Microphone(InputComponent):
"""
DEPRECATED. Component creates a microphone element for audio inputs.
Input type: numpy.array
"""
def __init__(self, preprocessing=None, label=None):
'''
Parameters:
preprocessing (Union[str, Callable]): preprocessing to apply to input
label (str): component name in interface.
'''
warnings.warn("Microphone has been deprecated. Please use 'Audio' component to generate a microphone. The string shorcut 'microphone' has been moved to the Audio component.", DeprecationWarning)
super().__init__(label)
if preprocessing is None or preprocessing == "mfcc":
self.preprocessing = preprocessing
else:
raise ValueError(
"unexpected value for preprocessing", preprocessing)
def preprocess(self, x):
"""
By default, no pre-processing is applied to a microphone input file
"""
file_obj = processing_utils.decode_base64_to_file(x)
if self.preprocessing == "mfcc":
return processing_utils.generate_mfcc_features_from_audio_file(file_obj.name)
_, signal = scipy.io.wavfile.read(file_obj.name)
return signal
def save_flagged(self, dir, label, data):
inp = data.split(';')[1].split(',')[1]
wav_obj = base64.b64decode(inp)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.wav'
with open("{}/{}".format(dir, filename), "wb+") as f:
f.write(wav_obj)
return filename

View File

@ -229,9 +229,9 @@ def flag_data(input_data, output_data, flag_option=None):
flag_path = os.path.join(app.cwd, app.interface.flagging_dir)
csv_data = []
for i, interface in enumerate(app.interface.input_components):
csv_data.append(interface.save_flagged(flag_path, app.interface.config["input_components"][i][1]["label"], input_data[i]))
csv_data.append(interface.save_flagged(flag_path, app.interface.config["input_components"][i]["label"], input_data[i]))
for i, interface in enumerate(app.interface.output_components):
csv_data.append(interface.save_flagged(flag_path, app.interface.config["output_components"][i][1]["label"], output_data[i]))
csv_data.append(interface.save_flagged(flag_path, app.interface.config["output_components"][i]["label"], output_data[i]))
if flag_option:
csv_data.append(flag_option)

View File

@ -5,8 +5,7 @@ import tempfile
import scipy.io.wavfile
from scipy.fftpack import dct
import numpy as np
import skimage
from gradio import encryptor
#########################
# IMAGE PRE-PROCESSING
@ -37,7 +36,7 @@ def encode_plot_to_base64(plt):
def encode_array_to_base64(image_array):
with BytesIO() as output_bytes:
PIL_image = Image.fromarray(skimage.img_as_ubyte(image_array))
PIL_image = Image.fromarray(_convert(image_array, np.uint8, force_copy=False))
PIL_image.save(output_bytes, 'PNG')
bytes_data = output_bytes.getvalue()
base64_str = str(base64.b64encode(bytes_data), 'utf-8')
@ -80,16 +79,303 @@ def decode_base64_to_binary(encoding):
data = encoding
return base64.b64decode(data), extension
def decode_base64_to_file(encoding):
def decode_base64_to_file(encoding, encryption_key=None, filename_prefix=""):
data, extension = decode_base64_to_binary(encoding)
if extension is None:
file_obj = tempfile.NamedTemporaryFile(delete=False)
file_obj = tempfile.NamedTemporaryFile(delete=False, prefix=filename_prefix)
else:
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix="."+extension)
file_obj = tempfile.NamedTemporaryFile(delete=False, prefix=filename_prefix, suffix="."+extension)
if encryption_key is not None:
data = encryptor.encrypt(encryption_key, data)
#print("saving to ", file_obj.name)
file_obj.write(data)
file_obj.flush()
return file_obj
def _convert(image, dtype, force_copy=False, uniform=False):
"""
Adapted from: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/dtype.py#L510-L531
Convert an image to the requested data-type.
Warnings are issued in case of precision loss, or when negative values
are clipped during conversion to unsigned integer types (sign loss).
Floating point values are expected to be normalized and will be clipped
to the range [0.0, 1.0] or [-1.0, 1.0] when converting to unsigned or
signed integers respectively.
Numbers are not shifted to the negative side when converting from
unsigned to signed integer types. Negative values will be clipped when
converting to unsigned integers.
Parameters
----------
image : ndarray
Input image.
dtype : dtype
Target data-type.
force_copy : bool, optional
Force a copy of the data, irrespective of its current dtype.
uniform : bool, optional
Uniformly quantize the floating point range to the integer range.
By default (uniform=False) floating point values are scaled and
rounded to the nearest integers, which minimizes back and forth
conversion errors.
.. versionchanged :: 0.15
``_convert`` no longer warns about possible precision or sign
information loss. See discussions on these warnings at:
https://github.com/scikit-image/scikit-image/issues/2602
https://github.com/scikit-image/scikit-image/issues/543#issuecomment-208202228
https://github.com/scikit-image/scikit-image/pull/3575
References
----------
.. [1] DirectX data conversion rules.
https://msdn.microsoft.com/en-us/library/windows/desktop/dd607323%28v=vs.85%29.aspx
.. [2] Data Conversions. In "OpenGL ES 2.0 Specification v2.0.25",
pp 7-8. Khronos Group, 2010.
.. [3] Proper treatment of pixels as integers. A.W. Paeth.
In "Graphics Gems I", pp 249-256. Morgan Kaufmann, 1990.
.. [4] Dirty Pixels. J. Blinn. In "Jim Blinn's corner: Dirty Pixels",
pp 47-57. Morgan Kaufmann, 1998.
"""
dtype_range = {bool: (False, True),
np.bool_: (False, True),
np.bool8: (False, True),
float: (-1, 1),
np.float_: (-1, 1),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
def _dtype_itemsize(itemsize, *dtypes):
"""Return first of `dtypes` with itemsize greater than `itemsize`
Parameters
----------
itemsize: int
The data type object element size.
Other Parameters
----------------
*dtypes:
Any Object accepted by `np.dtype` to be converted to a data
type object
Returns
-------
dtype: data type object
First of `dtypes` with itemsize greater than `itemsize`.
"""
return next(dt for dt in dtypes if np.dtype(dt).itemsize >= itemsize)
def _dtype_bits(kind, bits, itemsize=1):
"""Return dtype of `kind` that can store a `bits` wide unsigned int
Parameters:
kind: str
Data type kind.
bits: int
Desired number of bits.
itemsize: int
The data type object element size.
Returns
-------
dtype: data type object
Data type of `kind` that can store a `bits` wide unsigned int
"""
s = next(i for i in (itemsize, ) + (2, 4, 8) if
bits < (i * 8) or (bits == (i * 8) and kind == 'u'))
return np.dtype(kind + str(s))
def _scale(a, n, m, copy=True):
"""Scale an array of unsigned/positive integers from `n` to `m` bits.
Numbers can be represented exactly only if `m` is a multiple of `n`.
Parameters
----------
a : ndarray
Input image array.
n : int
Number of bits currently used to encode the values in `a`.
m : int
Desired number of bits to encode the values in `out`.
copy : bool, optional
If True, allocates and returns new array. Otherwise, modifies
`a` in place.
Returns
-------
out : array
Output image array. Has the same kind as `a`.
"""
kind = a.dtype.kind
if n > m and a.max() < 2 ** m:
mnew = int(np.ceil(m / 2) * 2)
if mnew > m:
dtype = "int{}".format(mnew)
else:
dtype = "uint{}".format(mnew)
n = int(np.ceil(n / 2) * 2)
return a.astype(_dtype_bits(kind, m))
elif n == m:
return a.copy() if copy else a
elif n > m:
# downscale with precision loss
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
casting='unsafe')
return b
else:
a //= 2**(n - m)
return a
elif m % n == 0:
# exact upscale to a multiple of `n` bits
if copy:
b = np.empty(a.shape, _dtype_bits(kind, m))
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
return b
else:
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
a *= (2**m - 1) // (2**n - 1)
return a
else:
# upscale to a multiple of `n` bits,
# then downscale with precision loss
o = (m // n + 1) * n
if copy:
b = np.empty(a.shape, _dtype_bits(kind, o))
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
b //= 2**(o - m)
return b
else:
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
a *= (2**o - 1) // (2**n - 1)
a //= 2**(o - m)
return a
image = np.asarray(image)
dtypeobj_in = image.dtype
if dtype is np.floating:
dtypeobj_out = np.dtype('float64')
else:
dtypeobj_out = np.dtype(dtype)
dtype_in = dtypeobj_in.type
dtype_out = dtypeobj_out.type
kind_in = dtypeobj_in.kind
kind_out = dtypeobj_out.kind
itemsize_in = dtypeobj_in.itemsize
itemsize_out = dtypeobj_out.itemsize
# Below, we do an `issubdtype` check. Its purpose is to find out
# whether we can get away without doing any image conversion. This happens
# when:
#
# - the output and input dtypes are the same or
# - when the output is specified as a type, and the input dtype
# is a subclass of that type (e.g. `np.floating` will allow
# `float32` and `float64` arrays through)
if np.issubdtype(dtype_in, np.obj2sctype(dtype)):
if force_copy:
image = image.copy()
return image
if kind_in in 'ui':
imin_in = np.iinfo(dtype_in).min
imax_in = np.iinfo(dtype_in).max
if kind_out in 'ui':
imin_out = np.iinfo(dtype_out).min
imax_out = np.iinfo(dtype_out).max
# any -> binary
if kind_out == 'b':
return image > dtype_in(dtype_range[dtype_in][1] / 2)
# binary -> any
if kind_in == 'b':
result = image.astype(dtype_out)
if kind_out != 'f':
result *= dtype_out(dtype_range[dtype_out][1])
return result
# float -> any
if kind_in == 'f':
if kind_out == 'f':
# float -> float
return image.astype(dtype_out)
if np.min(image) < -1.0 or np.max(image) > 1.0:
raise ValueError("Images of type float must be between -1 and 1.")
# floating point -> integer
# use float type that can represent output integer type
computation_type = _dtype_itemsize(itemsize_out, dtype_in,
np.float32, np.float64)
if not uniform:
if kind_out == 'u':
image_out = np.multiply(image, imax_out,
dtype=computation_type)
else:
image_out = np.multiply(image, (imax_out - imin_out) / 2,
dtype=computation_type)
image_out -= 1.0 / 2.
np.rint(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
elif kind_out == 'u':
image_out = np.multiply(image, imax_out + 1,
dtype=computation_type)
np.clip(image_out, 0, imax_out, out=image_out)
else:
image_out = np.multiply(image, (imax_out - imin_out + 1.0) / 2.0,
dtype=computation_type)
np.floor(image_out, out=image_out)
np.clip(image_out, imin_out, imax_out, out=image_out)
return image_out.astype(dtype_out)
# signed/unsigned int -> float
if kind_out == 'f':
# use float type that can exactly represent input integers
computation_type = _dtype_itemsize(itemsize_in, dtype_out,
np.float32, np.float64)
if kind_in == 'u':
# using np.divide or np.multiply doesn't copy the data
# until the computation time
image = np.multiply(image, 1. / imax_in,
dtype=computation_type)
# DirectX uses this conversion also for signed ints
# if imin_in:
# np.maximum(image, -1.0, out=image)
else:
image = np.add(image, 0.5, dtype=computation_type)
image *= 2 / (imax_in - imin_in)
return np.asarray(image, dtype_out)
# unsigned int -> signed/unsigned int
if kind_in == 'u':
if kind_out == 'i':
# unsigned int -> signed int
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out - 1)
return image.view(dtype_out)
else:
# unsigned int -> unsigned int
return _scale(image, 8 * itemsize_in, 8 * itemsize_out)
# signed int -> unsigned int
if kind_out == 'u':
image = _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out)
result = np.empty(image.shape, dtype_out)
np.maximum(image, 0, out=result, dtype=image.dtype, casting='unsafe')
return result
# signed int -> signed int
if itemsize_in > itemsize_out:
return _scale(image, 8 * itemsize_in - 1, 8 * itemsize_out - 1)
image = image.astype(_dtype_bits('i', itemsize_out * 8))
image -= imin_in
image = _scale(image, 8 * itemsize_in, 8 * itemsize_out, copy=False)
image += imin_out
return image.astype(dtype_out)
##################
# AUDIO FILES
##################
@ -178,6 +464,4 @@ def generate_mfcc_features_from_audio_file(wav_filename=None,
filter_banks -= (np.mean(filter_banks, axis=0) + 1e-8)
mfcc -= (np.mean(mfcc, axis=0) + 1e-8)
return mfcc[np.newaxis, :, :] # Create a batch dimension.
return mfcc[np.newaxis, :, :] # Create a batch dimension.