Merge branch 'master' into port-issue

This commit is contained in:
Abubakar Abid 2021-12-21 10:43:09 -06:00
commit d69ab86247
17 changed files with 428 additions and 135 deletions

View File

@ -18,7 +18,7 @@ jobs:
. venv/bin/activate
pip install --upgrade pip
pip install -r gradio.egg-info/requires.txt
pip install shap IPython comet_ml wandb mlflow
pip install shap IPython comet_ml wandb mlflow tensorflow transformers
pip install selenium==4.0.0a6.post2 coverage scikit-image
- run:
command: |

View File

@ -59,19 +59,19 @@ iface = gr.Interface(
gr.inputs.Timeseries(x="time", y="value", optional=True),
],
outputs=[
gr.outputs.Textbox(),
gr.outputs.Label(),
gr.outputs.Audio(),
gr.outputs.Image(),
gr.outputs.Video(),
gr.outputs.KeyValues(),
gr.outputs.HighlightedText(),
gr.outputs.JSON(),
gr.outputs.HTML(),
gr.outputs.File(),
gr.outputs.Dataframe(),
gr.outputs.Carousel("image"),
gr.outputs.Timeseries(x="time", y="value")
gr.outputs.Textbox(label="Textbox"),
gr.outputs.Label(label="Label"),
gr.outputs.Audio(label="Audio"),
gr.outputs.Image(label="Image"),
gr.outputs.Video(label="Video"),
gr.outputs.KeyValues(label="KeyValues"),
gr.outputs.HighlightedText(label="HighlightedText"),
gr.outputs.JSON(label="JSON"),
gr.outputs.HTML(label="HTML"),
gr.outputs.File(label="File"),
gr.outputs.Dataframe(label="Dataframe"),
gr.outputs.Carousel("image", label="Carousel"),
gr.outputs.Timeseries(x="time", y="value", label="Timeseries")
],
theme="huggingface",
title="Kitchen Sink",

View File

@ -1,11 +1,14 @@
Metadata-Version: 1.0
Metadata-Version: 2.1
Name: gradio
Version: 2.5.2
Version: 2.5.3
Summary: Python library for easily interacting with trained machine learning models
Home-page: https://github.com/gradio-app/gradio-UI
Author: Abubakar Abid
Author-email: a12d@stanford.edu
License: Apache License 2.0
Description: UNKNOWN
Keywords: machine learning,visualization,reproducibility
Platform: UNKNOWN
License-File: LICENSE
UNKNOWN

View File

@ -1,15 +1,15 @@
Flask-Cors>=3.0.8
Flask-Login
Flask>=1.1.1
analytics-python
ffmpy
flask-cachebuster
markdown2
matplotlib
numpy
pandas
paramiko
pillow
pycryptodome
pydub
matplotlib
pandas
pillow
ffmpy
markdown2
pycryptodome
requests
paramiko
analytics-python
Flask>=1.1.1
Flask-Cors>=3.0.8
flask-cachebuster
Flask-Login

View File

@ -28,6 +28,51 @@ def get_huggingface_interface(model_name, api_key, alias):
return fp.name
pipelines = {
'audio-classification': {
# example model: https://hf.co/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition
'inputs': inputs.Audio(label="Input", source="upload",
type="filepath"),
'outputs': outputs.Label(label="Class", type="confidences"),
'preprocess': lambda i: base64.b64decode(i['data'].split(",")[1]), # convert the base64 representation to binary
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r.json()}
},
'automatic-speech-recognition': {
# example model: https://hf.co/jonatasgrosman/wav2vec2-large-xlsr-53-english
'inputs': inputs.Audio(label="Input", source="upload",
type="filepath"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda i: base64.b64decode(i['data'].split(",")[1]), # convert the base64 representation to binary
'postprocess': lambda r: r.json()["text"]
},
'feature-extraction': {
# example model: hf.co/julien-c/distilbert-feature-extraction
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Dataframe(label="Output"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0],
},
'fill-mask': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: {i["token_str"]: i["score"] for i in r.json()}
},
'image-classification': {
# Example: https://huggingface.co/google/vit-base-patch16-224
'inputs': inputs.Image(label="Input Image", type="filepath"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda i: base64.b64decode(i.split(",")[1]), # convert the base64 representation to binary
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r.json()}
},
# TODO: support image segmentation pipeline -- should we add a new output component type?
# 'image-segmentation': {
# # Example: https://hf.co/facebook/detr-resnet-50-panoptic
# 'inputs': inputs.Image(label="Input Image", type="filepath"),
# 'outputs': outputs.Image(label="Segmentation"),
# 'preprocess': lambda i: base64.b64decode(i.split(",")[1]), # convert the base64 representation to binary
# 'postprocess': lambda x: base64.b64encode(x.json()[0]["mask"]).decode('utf-8'),
# },
# TODO: also: support NER pipeline, object detection, table question answering
'question-answering': {
'inputs': [inputs.Textbox(label="Context", lines=7), inputs.Textbox(label="Question")],
'outputs': [outputs.Textbox(label="Answer"), outputs.Label(label="Score")],
@ -35,24 +80,23 @@ def get_huggingface_interface(model_name, api_key, alias):
'postprocess': lambda r: (r.json()["answer"], r.json()["score"]),
# 'examples': [['My name is Sarah and I live in London', 'Where do I live?']]
},
'text-generation': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0]["generated_text"],
# 'examples': [['My name is Clara and I am']]
},
'summarization': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Summary"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0]["summary_text"]
},
'translation': {
'text-classification': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Translation"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0]["translation_text"]
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r.json()[0]}
},
'text-generation': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0]["generated_text"],
},
'text2text-generation': {
'inputs': inputs.Textbox(label="Input"),
@ -60,18 +104,11 @@ def get_huggingface_interface(model_name, api_key, alias):
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0]["generated_text"]
},
'text-classification': {
'translation': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'outputs': outputs.Textbox(label="Translation"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: {'Negative': r.json()[0][0]["score"],
'Positive': r.json()[0][1]["score"]}
},
'fill-mask': {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: {i["token_str"]: i["score"] for i in r.json()}
'postprocess': lambda r: r.json()[0]["translation_text"]
},
'zero-shot-classification': {
'inputs': [inputs.Textbox(label="Input"),
@ -84,26 +121,7 @@ def get_huggingface_interface(model_name, api_key, alias):
'postprocess': lambda r: {r.json()["labels"][i]: r.json()["scores"][i] for i in
range(len(r.json()["labels"]))}
},
'automatic-speech-recognition': {
'inputs': inputs.Audio(label="Input", source="upload",
type="filepath"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda i: {"inputs": i},
'postprocess': lambda r: r.json()["text"]
},
'image-classification': {
'inputs': inputs.Image(label="Input Image", type="filepath"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda i: base64.b64decode(i.split(",")[1]), # convert the base64 representation to binary
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r.json()}
},
'feature-extraction': {
# example model: hf.co/julien-c/distilbert-feature-extraction
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Dataframe(label="Output"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r.json()[0],
},
# Non-HF pipelines
'sentence-similarity': {
# example model: hf.co/sentence-transformers/distilbert-base-nli-stsb-mean-tokens
'inputs': [
@ -134,7 +152,7 @@ def get_huggingface_interface(model_name, api_key, alias):
}
if p is None or not(p in pipelines):
print("Warning: no interface information found")
raise ValueError("Unsupported pipeline type: {}".format(type(p)))
pipeline = pipelines[p]
@ -221,3 +239,138 @@ repos = {
"spaces": get_spaces_interface,
}
def load_from_pipeline(pipeline):
"""
Gets the appropriate Interface kwargs for a given Hugging Face transformers.Pipeline.
pipeline (transformers.Pipeline): the transformers.Pipeline from which to create an interface
Returns:
(dict): a dictionary of kwargs that can be used to construct an Interface object
"""
try:
import transformers
except ImportError:
raise ImportError("transformers not installed. Please try `pip install transformers`")
if not isinstance(pipeline, transformers.Pipeline):
raise ValueError("pipeline must be a transformers.Pipeline")
# Handle the different pipelines. The has_attr() checks to make sure the pipeline exists in the
# version of the transformers library that the user has installed.
if hasattr(transformers, 'AudioClassificationPipeline') and isinstance(pipeline, transformers.AudioClassificationPipeline):
pipeline_info = {
'inputs': inputs.Audio(label="Input", source="microphone",
type="filepath"),
'outputs': outputs.Label(label="Class", type="confidences"),
'preprocess': lambda i: {"inputs": i},
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}
}
elif hasattr(transformers, 'AutomaticSpeechRecognitionPipeline') and isinstance(pipeline, transformers.AutomaticSpeechRecognitionPipeline):
pipeline_info = {
'inputs': inputs.Audio(label="Input", source="microphone",
type="filepath"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda i: {"inputs": i},
'postprocess': lambda r: r["text"]
}
elif hasattr(transformers, 'FeatureExtractionPipeline') and isinstance(pipeline, transformers.FeatureExtractionPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Dataframe(label="Output"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r[0],
}
elif hasattr(transformers, 'FillMaskPipeline') and isinstance(pipeline, transformers.FillMaskPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: {i["token_str"]: i["score"] for i in r}
}
elif hasattr(transformers, 'ImageClassificationPipeline') and isinstance(pipeline, transformers.ImageClassificationPipeline):
pipeline_info = {
'inputs': inputs.Image(label="Input Image", type="filepath"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda i: {"images": i},
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}
}
elif hasattr(transformers, 'QuestionAnsweringPipeline') and isinstance(pipeline, transformers.QuestionAnsweringPipeline):
pipeline_info = {
'inputs': [inputs.Textbox(label="Context", lines=7), inputs.Textbox(label="Question")],
'outputs': [outputs.Textbox(label="Answer"), outputs.Label(label="Score")],
'preprocess': lambda c, q: {"context": c, "question": q},
'postprocess': lambda r: (r["answer"], r["score"]),
}
elif hasattr(transformers, 'SummarizationPipeline') and isinstance(pipeline, transformers.SummarizationPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input", lines=7),
'outputs': outputs.Textbox(label="Summary"),
'preprocess': lambda x: {"inputs": x},
'postprocess': lambda r: r[0]["summary_text"]
}
elif hasattr(transformers, 'TextClassificationPipeline') and isinstance(pipeline, transformers.TextClassificationPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda x: [x],
'postprocess': lambda r: {i["label"].split(", ")[0]: i["score"] for i in r}
}
elif hasattr(transformers, 'TextGenerationPipeline') and isinstance(pipeline, transformers.TextGenerationPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Output"),
'preprocess': lambda x: {"text_inputs": x},
'postprocess': lambda r: r[0]["generated_text"],
}
elif hasattr(transformers, 'TranslationPipeline') and isinstance(pipeline, transformers.TranslationPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Translation"),
'preprocess': lambda x: [x],
'postprocess': lambda r: r[0]["translation_text"]
}
elif hasattr(transformers, 'Text2TextGenerationPipeline') and isinstance(pipeline, transformers.Text2TextGenerationPipeline):
pipeline_info = {
'inputs': inputs.Textbox(label="Input"),
'outputs': outputs.Textbox(label="Generated Text"),
'preprocess': lambda x: [x],
'postprocess': lambda r: r[0]["generated_text"]
}
elif hasattr(transformers, 'ZeroShotClassificationPipeline') and isinstance(pipeline, transformers.ZeroShotClassificationPipeline):
pipeline_info = {
'inputs': [inputs.Textbox(label="Input"),
inputs.Textbox(label="Possible class names ("
"comma-separated)"),
inputs.Checkbox(label="Allow multiple true classes")],
'outputs': outputs.Label(label="Classification", type="confidences"),
'preprocess': lambda i, c, m: {"sequences": i,
"candidate_labels": c, "multi_label": m},
'postprocess': lambda r: {r["labels"][i]: r["scores"][i] for i in
range(len(r["labels"]))}
}
else:
raise ValueError("Unsupported pipeline type: {}".format(type(pipeline)))
# define the function that will be called by the Interface
def fn(*params):
data = pipeline_info["preprocess"](*params)
# special cases that needs to be handled differently
if isinstance(pipeline, (transformers.TextClassificationPipeline,
transformers.Text2TextGenerationPipeline,
transformers.TranslationPipeline)):
data = pipeline(*data)
else:
data = pipeline(**data)
# print("Before postprocessing", data)
output = pipeline_info["postprocess"](data)
return output
interface_info = pipeline_info.copy()
interface_info["fn"] = fn
del interface_info["preprocess"]
del interface_info["postprocess"]
# define the title/description of the Interface
interface_info["title"] = pipeline.model.__class__.__name__
return interface_info

View File

@ -1021,6 +1021,8 @@ class Audio(InputComponent):
return processing_utils.encode_file_to_base64(x, type="audio")
def serialize(self, x, called_directly):
if x is None:
return None
if self.type == "filepath" or called_directly:
name = x
elif self.type == "file":

View File

@ -22,7 +22,7 @@ from gradio import networking, strings, utils, encryptor, queue
from gradio.inputs import get_input_instance
from gradio.outputs import get_output_instance
from gradio.interpretation import quantify_difference_in_label, get_regression_or_classification_value
from gradio.external import load_interface
from gradio.external import load_interface, load_from_pipeline
class Interface:
@ -59,6 +59,19 @@ class Interface:
interface.api_mode = True # set api mode to true so that the interface will not preprocess/postprocess
return interface
@classmethod
def from_pipeline(cls, pipeline, **kwargs):
"""
Class method to construct an Interface from a Hugging Face transformers.Pipeline.
pipeline (transformers.Pipeline):
Returns:
(gradio.Interface): a Gradio Interface object from the given Pipeline
"""
interface_info = load_from_pipeline(pipeline)
kwargs = dict(interface_info, **kwargs)
interface = cls(**kwargs)
return interface
def __init__(self, fn, inputs=None, outputs=None, verbose=None, examples=None,
examples_per_page=10, live=False, layout="unaligned", show_input=True, show_output=True,
capture_session=None, interpretation=None, num_shap=2.0, theme=None, repeat_outputs_per_model=True,

View File

@ -18,7 +18,7 @@ en = {
"COLAB_DEBUG_TRUE": "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
"To turn off, set debug=False in launch().",
"COLAB_DEBUG_FALSE": "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()",
"SHARE_LINK_MESSAGE": "\nThis share link will expire in 72 hours. To get longer links, send an email to: support@gradio.app",
"SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting, check out Spaces (https://huggingface.co/spaces)",
"PRIVATE_LINK_MESSAGE": "Since this is a private endpoint, this share link will never expire.",
"INLINE_DISPLAY_BELOW": "Interface loading below...",
"MEDIA_PERMISSIONS_IN_COLAB": "Your interface requires microphone or webcam permissions - this may cause issues in Colab. Use the External URL in case of issues.",

View File

@ -1,18 +1,18 @@
{
"files": {
"main.css": "/static/css/main.6ccf3d75.css",
"main.css": "/static/css/main.1d47367f.css",
"main.js": "/static/bundle.js",
"index.html": "/index.html",
"static/media/arrow-left.e497f657.svg": "/static/media/arrow-left.e497f657.svg",
"static/media/arrow-right.ea6059fd.svg": "/static/media/arrow-right.ea6059fd.svg",
"static/media/clear.33f9b5f3.svg": "/static/media/clear.33f9b5f3.svg",
"static/media/edit.44bd4fe1.svg": "/static/media/edit.44bd4fe1.svg",
"static/media/logo.411acfd1.svg": "/static/media/logo.411acfd1.svg",
"static/media/arrow-left.794a4706.svg": "/static/media/arrow-left.794a4706.svg",
"static/media/arrow-right.5a7d4ada.svg": "/static/media/arrow-right.5a7d4ada.svg",
"static/media/clear.85cf6de8.svg": "/static/media/clear.85cf6de8.svg",
"static/media/edit.c6b7d6f7.svg": "/static/media/edit.c6b7d6f7.svg",
"static/media/logo.36a8f455.svg": "/static/media/logo.36a8f455.svg",
"static/media/logo_loading.e93acd82.jpg": "/static/media/logo_loading.e93acd82.jpg"
},
"entrypoints": [
"static/bundle.css",
"static/css/main.6ccf3d75.css",
"static/css/main.1d47367f.css",
"static/bundle.js"
]
}

View File

@ -8,4 +8,4 @@
window.config = {{ config|tojson }};
} catch (e) {
window.config = {};
}</script><script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script><title>Gradio</title><link href="static/bundle.css" rel="stylesheet"><link href="static/css/main.6ccf3d75.css" rel="stylesheet"></head><body style="height:100%"><div id="root" style="height:100%"></div><script src="static/bundle.js"></script></body></html>
}</script><script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script><title>Gradio</title><link href="static/bundle.css" rel="stylesheet"><link href="static/css/main.1d47367f.css" rel="stylesheet"></head><body style="height:100%"><div id="root" style="height:100%"></div><script src="static/bundle.js"></script></body></html>

View File

@ -5,7 +5,7 @@ except ImportError:
setup(
name='gradio',
version='2.5.2',
version='2.5.3',
include_package_data=True,
description='Python library for easily interacting with trained machine learning models',
author='Abubakar Abid',

View File

@ -2,6 +2,8 @@ import unittest
import pathlib
import gradio as gr
import os
import transformers
"""
WARNING: These tests have an external dependency: namely that Hugging Face's Hub and Space APIs do not change, and they keep their most famous models up. So if, e.g. Spaces is down, then these test will not pass.
@ -153,7 +155,7 @@ class TestLoadInterface(unittest.TestCase):
io = gr.Interface(**interface_info)
io.api_mode = True
output = io("I am happy, I love you.")
self.assertGreater(output['Positive'], 0.5)
self.assertGreater(output['POSITIVE'], 0.5)
def test_image_classification_model(self):
interface_info = gr.external.load_interface("models/google/vit-base-patch16-224")
@ -176,6 +178,13 @@ class TestLoadInterface(unittest.TestCase):
output = io("male", 77, 10)
self.assertLess(output['Survives'], 0.5)
def test_speech_recognition_model(self):
interface_info = gr.external.load_interface("models/jonatasgrosman/wav2vec2-large-xlsr-53-english")
io = gr.Interface(**interface_info)
io.api_mode = True
output = io("test/test_data/test_audio.wav")
self.assertIsNotNone(output)
def test_image_to_image_space(self):
def assertIsFile(path):
if not pathlib.Path(path).resolve().is_file():
@ -187,5 +196,13 @@ class TestLoadInterface(unittest.TestCase):
output = io("test/test_data/lion.jpg")
assertIsFile(output)
class TestLoadFromPipeline(unittest.TestCase):
def test_question_answering(self):
p = transformers.pipeline("question-answering")
io = gr.Interface.from_pipeline(p)
output = io("My name is Sylvain and I work at Hugging Face in Brooklyn", "Where do I work?")
self.assertIsNotNone(output)
if __name__ == '__main__':
unittest.main()

View File

@ -101,7 +101,7 @@ class TestInterface(unittest.TestCase):
def test_interface_load(self):
io = Interface.load("models/distilbert-base-uncased-finetuned-sst-2-english", alias="sentiment_classifier")
output = io("I am happy, I love you.")
self.assertGreater(output['Positive'], 0.5)
self.assertGreater(output['POSITIVE'], 0.5)
def test_interface_none_interp(self):
interface = Interface(lambda x: x, "textbox", "label", interpretation=[None])

View File

@ -14,11 +14,12 @@ sys.path.insert(0, GRADIO_DEMO_DIR)
with open("demos.json") as demos_file:
demo_port_sets = json.load(demos_file)
def launch_demo(demo_file):
subprocess.call(f"python {demo_file}", shell=True)
def launch_demo(demo_folder):
subprocess.call(f"cd {demo_folder} && python run.py", shell=True)
for demo_name, port in demo_port_sets:
demo_file = os.path.join(GRADIO_DEMO_DIR, demo_name, "run.py")
demo_folder = os.path.join(GRADIO_DEMO_DIR, demo_name)
demo_file = os.path.join(demo_folder, "run.py")
with open(demo_file, 'r') as file:
filedata = file.read()
filedata = filedata.replace(
@ -26,7 +27,7 @@ for demo_name, port in demo_port_sets:
f'if __name__ == "__main__":\n iface.server_port={port}')
with open(demo_file, 'w') as file:
file.write(filedata)
demo_thread = threading.Thread(target=launch_demo, args=(demo_file,))
demo_thread = threading.Thread(target=launch_demo, args=(demo_folder,))
demo_thread.start()
start_time = time.time()

View File

@ -0,0 +1,124 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Gradio</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/tailwindcss/2.2.19/tailwind.min.css"/>
<link href="/gradio_static/bundle.css" rel="stylesheet">
<style></style>
</head>
<body>
<div class="bg-gray-100 p-2">
<div class="max-w-5xl mx-auto">
<div class="mb-2 text-center text-gray-800 font-semibold">THEME</div>
<div class="theme-set mb-2 flex flex-wrap gap-x-2 gap-y-0.5 justify-center"></div>
<div class="mb-2 text-center text-gray-800 font-semibold">
INPUTS (<a class="underline hover:text-blue-600" href="javascript:show_all_inputs();">all</a> / <a class="underline hover:text-blue-600" href="javascript:hide_all_inputs();">none</a>)
</div>
<div class="input-components mb-2 flex flex-wrap gap-x-2 gap-y-0.5 justify-center"></div>
<div class="mb-2 text-center text-gray-800 font-semibold">
OUTPUTS (<a class="underline hover:text-blue-600" href="javascript:show_all_outputs();">all</a> / <a class="underline hover:text-blue-600" href="javascript:hide_all_outputs();">none</a>)
</div>
<div class="output-components mb-2 flex flex-wrap gap-x-2 gap-y-0.5 justify-center"></div>
</div>
</div>
<div id="target"></div>
<script>
var set_theme = theme => {
if (theme.startsWith("dark")) {
document.querySelector("#target").classList.add("dark");
theme = theme.substring(4);
} else {
document.querySelector("#target").classList.remove("dark");
}
document.querySelector(".gradio_bg").setAttribute("theme", theme);
}
var hidden_inputs = new Set();
var hidden_outputs = new Set();
var reset_component_visibility = () => {
let style_element = document.querySelector("style");
let style_html = "";
if (hidden_inputs.size > 0) {
let inputs_selected = Array(...hidden_inputs).map(i => `.panel:nth-child(1) .component:nth-child(${i + 1})`).join(", ");
style_html += `${inputs_selected} { display: none !important }`;
}
if (hidden_outputs.size > 0) {
let outputs_selected = Array(...hidden_outputs).map(i => `.panel:nth-child(2) .component:nth-child(${i + 2})`).join(", ");
style_html += `${outputs_selected} { display: none !important }`;
}
style_element.innerHTML = style_html;
}
var toggle_input = index => {
if (hidden_inputs.has(index)) {
hidden_inputs.delete(index)
} else {
hidden_inputs.add(index)
}
reset_component_visibility();
}
var toggle_output = index => {
if (hidden_outputs.has(index)) {
hidden_outputs.delete(index)
} else {
hidden_outputs.add(index)
}
reset_component_visibility();
}
var show_all_inputs = () => {
document.querySelectorAll(".input-components input").forEach(n => n.checked = true);
hidden_inputs.clear();
reset_component_visibility();
}
var hide_all_inputs = () => {
document.querySelectorAll(".input-components input").forEach(n => n.checked = false);
window.config.input_components.forEach((_, i) => hidden_inputs.add(i));
reset_component_visibility();
}
var show_all_outputs = () => {
document.querySelectorAll(".output-components input").forEach(n => n.checked = true);
hidden_outputs.clear();
reset_component_visibility();
}
var hide_all_outputs = () => {
document.querySelectorAll(".output-components input").forEach(n => n.checked = false);
window.config.output_components.forEach((_, i) => hidden_outputs.add(i));
reset_component_visibility();
}
</script>
<script>
var demo_endpoint = "/demo";
window.config = [];
let themes = ["default", "huggingface", "grass", "peach", "darkdefault", "darkhuggingface", "darkgrass", "darkpeach"]
let theme_html = "";
for (let theme of themes) {
theme_html += `<label><input type="radio" name="theme" onclick="set_theme('${theme}')"> ${theme}</label>`;
}
document.querySelector(".theme-set").innerHTML = theme_html;
fetch('/demo/kitchen_sink/config')
.then(response => response.json())
.then(demo => {
demo.target = "target";
demo.endpoint = demo_endpoint + "/kitchen_sink/api/";
window.config = demo;
let input_html = "";
for (let [i, input_component] of demo["input_components"].entries()) {
input_html += `<label><input type="checkbox" checked name="input" onchange="toggle_input(${i})"> ${input_component["label"]}</label>`;
}
document.querySelector(".input-components").innerHTML = input_html;
let output_html = "";
for (let [i, output_component] of demo["output_components"].entries()) {
output_html += `<label><input type="checkbox" checked name="output" onchange="toggle_output(${i})"> ${output_component["label"]}</label>`;
}
document.querySelector(".output-components").innerHTML = output_html;
});
</script>
<script async src="/gradio_static/bundle.js"></script>
</body>
</html>

View File

@ -1,48 +0,0 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Gradio</title>
<link href="/gradio_static/bundle.css" rel="stylesheet">
</head>
<body>
<div style="font-family:Verdana, Geneva, Tahoma, sans-serif; display: flex; justify-content: center; gap: 2rem; background-color: lightgray; padding: 2rem">
<label><input type="radio" name="theme" value="default"> default</label>
<label><input type="radio" name="theme" value="huggingface"> huggingface</label>
<label><input type="radio" name="theme" value="grass"> grass</label>
<label><input type="radio" name="theme" value="peach"> peach</label>
<label><input type="radio" name="theme" value="darkdefault"> darkdefault</label>
<label><input type="radio" name="theme" value="darkhuggingface"> darkhuggingface</label>
<label><input type="radio" name="theme" value="darkgrass"> darkgrass</label>
<label><input type="radio" name="theme" value="darkpeach"> darkpeach</label>
</div>
<div id="target"></div>
<script>
var demo_endpoint = "/demo";
window.config = [];
fetch('/demo/kitchen_sink/config')
.then(response => response.json())
.then(demo => {
demo.target = "target";
demo.endpoint = demo_endpoint + "/" + demo_name + "/api/";
window.config = demo;
});
</script>
<script async src="/gradio_static/bundle.js"></script>
<script>
var set_theme = theme => {
if (theme.startswith("dark")) {
document.querySelector("#target").classList.add("dark");
theme = theme.substring(5);
}
document.querySelector(".gradio_bg").setAttribute(theme);
}
</script>
</body>
</html>

28
website/nginx.conf Normal file
View File

@ -0,0 +1,28 @@
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 256;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
client_max_body_size 200M;
client_body_buffer_size 20M;
server {
listen 80;
location / {
proxy_pass http://localhost:8080/;
}
location /demo/ {
proxy_pass http://localhost:8070/demo/;
}
}
}