merge chagnes
1
.gitignore
vendored
@ -38,6 +38,7 @@ demo/files/*.mp4
|
||||
*.bak
|
||||
workspace.code-workspace
|
||||
*.h5
|
||||
.vscode/
|
||||
|
||||
# log files
|
||||
.pnpm-debug.log
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: gradio
|
||||
Version: 2.8.9
|
||||
Version: 2.8.14
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq
|
||||
|
@ -1,3 +1,4 @@
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
from gradio import utils
|
||||
@ -5,10 +6,14 @@ from gradio.context import Context
|
||||
from gradio.launchable import Launchable
|
||||
from gradio.routes import PredictBody
|
||||
|
||||
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
|
||||
from gradio.component import Component
|
||||
|
||||
|
||||
class Block:
|
||||
def __init__(self):
|
||||
def __init__(self, css=None):
|
||||
self._id = Context.id
|
||||
self.css = css
|
||||
Context.id += 1
|
||||
if Context.block is not None:
|
||||
Context.block.children.append(self)
|
||||
@ -31,6 +36,66 @@ class Block:
|
||||
}
|
||||
)
|
||||
|
||||
def change(
|
||||
self, fn: str, inputs: List["Component"], outputs: List["Component"]
|
||||
) -> None:
|
||||
"""
|
||||
Adds change event to the component's dependencies.
|
||||
|
||||
Whenever the component changes the function is triggered.
|
||||
|
||||
Parameters:
|
||||
fn: function name
|
||||
inputs: input list
|
||||
outputs: output list
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
if not isinstance(inputs, list):
|
||||
inputs = [inputs]
|
||||
if not isinstance(outputs, list):
|
||||
outputs = [outputs]
|
||||
Context.root_block.fns.append(fn)
|
||||
Context.root_block.dependencies.append(
|
||||
{
|
||||
"targets": [self._id],
|
||||
"trigger": "change",
|
||||
"inputs": [block._id for block in inputs],
|
||||
"outputs": [block._id for block in outputs],
|
||||
}
|
||||
)
|
||||
|
||||
def save(
|
||||
self, fn: str, inputs: List["Component"], outputs: List["Component"]
|
||||
) -> None:
|
||||
"""
|
||||
Adds save event to the component's dependencies.
|
||||
|
||||
Whenever the component is saved the function is triggered.
|
||||
|
||||
Parameters:
|
||||
fn: function name
|
||||
inputs: input list
|
||||
outputs: output list
|
||||
|
||||
Returns: None
|
||||
|
||||
"""
|
||||
if not isinstance(inputs, list):
|
||||
inputs = [inputs]
|
||||
if not isinstance(outputs, list):
|
||||
outputs = [outputs]
|
||||
Context.root_block.fns.append(fn)
|
||||
Context.root_block.dependencies.append(
|
||||
{
|
||||
"targets": [self._id],
|
||||
"trigger": "save",
|
||||
"inputs": [block._id for block in inputs],
|
||||
"outputs": [block._id for block in outputs],
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class BlockContext(Block):
|
||||
def __init__(self):
|
||||
@ -74,6 +139,7 @@ class Blocks(Launchable, BlockContext):
|
||||
self.theme = theme
|
||||
self.requires_permissions = False # TODO: needs to be implemented
|
||||
self.enable_queue = False
|
||||
self.is_space = True if os.getenv("SYSTEM") == "spaces" else False
|
||||
self.stateful = False # TODO: implement state
|
||||
|
||||
super().__init__()
|
||||
|
@ -4,7 +4,7 @@ import re
|
||||
|
||||
import requests
|
||||
|
||||
from gradio import inputs, outputs
|
||||
from gradio import inputs, outputs, utils
|
||||
|
||||
|
||||
def get_huggingface_interface(model_name, api_key, alias):
|
||||
@ -203,6 +203,13 @@ def get_huggingface_interface(model_name, api_key, alias):
|
||||
"preprocess": lambda x: {"inputs": x},
|
||||
"postprocess": encode_to_base64,
|
||||
},
|
||||
"token-classification": {
|
||||
# example model: hf.co/huggingface-course/bert-finetuned-ner
|
||||
"inputs": inputs.Textbox(label="Input"),
|
||||
"outputs": outputs.HighlightedText(label="Output"),
|
||||
"preprocess": lambda x: {"inputs": x},
|
||||
"postprocess": lambda r: r, # Handled as a special case in query_huggingface_api()
|
||||
},
|
||||
}
|
||||
|
||||
if p is None or not (p in pipelines):
|
||||
@ -225,6 +232,12 @@ def get_huggingface_interface(model_name, api_key, alias):
|
||||
response.status_code
|
||||
)
|
||||
)
|
||||
if (
|
||||
p == "token-classification"
|
||||
): # Handle as a special case since HF API only returns the named entities and we need the input as well
|
||||
ner_groups = response.json()
|
||||
input_string = params[0]
|
||||
response = utils.format_ner_list(input_string, ner_groups)
|
||||
output = pipeline["postprocess"](response)
|
||||
return output
|
||||
|
||||
@ -288,8 +301,8 @@ def interface_params_from_config(config_dict):
|
||||
def get_spaces_interface(model_name, api_key, alias):
|
||||
space_url = "https://huggingface.co/spaces/{}".format(model_name)
|
||||
print("Fetching interface from: {}".format(space_url))
|
||||
iframe_url = "https://huggingface.co/gradioiframe/{}/+".format(model_name)
|
||||
api_url = "https://huggingface.co/gradioiframe/{}/api/predict/".format(model_name)
|
||||
iframe_url = "https://hf.space/embed/{}/+".format(model_name)
|
||||
api_url = "https://hf.space/embed/{}/api/predict/".format(model_name)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
r = requests.get(iframe_url)
|
||||
|
@ -9,7 +9,7 @@ from abc import ABC, abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
|
||||
import gradio as gr
|
||||
from gradio import encryptor
|
||||
from gradio import encryptor, utils
|
||||
|
||||
|
||||
class FlaggingCallback(ABC):
|
||||
@ -98,7 +98,7 @@ class SimpleCSVLogger(FlaggingCallback):
|
||||
)
|
||||
|
||||
with open(log_filepath, "a", newline="") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
|
||||
writer.writerow(csv_data)
|
||||
|
||||
with open(log_filepath, "r") as csvfile:
|
||||
@ -185,7 +185,7 @@ class CSVLogger(FlaggingCallback):
|
||||
flag_col_index = header.index("flag")
|
||||
content[flag_index][flag_col_index] = flag_option
|
||||
output = io.StringIO()
|
||||
writer = csv.writer(output)
|
||||
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
|
||||
writer.writerows(content)
|
||||
return output.getvalue()
|
||||
|
||||
@ -201,7 +201,7 @@ class CSVLogger(FlaggingCallback):
|
||||
if flag_index is not None:
|
||||
file_content = replace_flag_at_index(file_content)
|
||||
output.write(file_content)
|
||||
writer = csv.writer(output)
|
||||
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
|
||||
if flag_index is None:
|
||||
if is_new:
|
||||
writer.writerow(headers)
|
||||
@ -215,7 +215,9 @@ class CSVLogger(FlaggingCallback):
|
||||
else:
|
||||
if flag_index is None:
|
||||
with open(log_fp, "a", newline="") as csvfile:
|
||||
writer = csv.writer(csvfile)
|
||||
writer = csv.writer(
|
||||
csvfile, quoting=csv.QUOTE_NONNUMERIC, quotechar="'"
|
||||
)
|
||||
if is_new:
|
||||
writer.writerow(headers)
|
||||
writer.writerow(csv_data)
|
||||
@ -328,7 +330,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
for i, component in enumerate(interface.input_components):
|
||||
component_label = interface.config["input_components"][i][
|
||||
"label"
|
||||
] or "Input_{}".format(i)
|
||||
] or "input_{}".format(i)
|
||||
headers.append(component_label)
|
||||
infos["flagged"]["features"][component_label] = {
|
||||
"dtype": "string",
|
||||
@ -346,7 +348,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
for i, component in enumerate(interface.output_components):
|
||||
component_label = interface.config["output_components"][i][
|
||||
"label"
|
||||
] or "Output_{}".format(i)
|
||||
] or "output_{}".format(i)
|
||||
headers.append(component_label)
|
||||
infos["flagged"]["features"][component_label] = {
|
||||
"dtype": "string",
|
||||
@ -375,7 +377,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
for i, component in enumerate(interface.input_components):
|
||||
label = interface.config["input_components"][i][
|
||||
"label"
|
||||
] or "Input_{}".format(i)
|
||||
] or "input_{}".format(i)
|
||||
filepath = component.save_flagged(
|
||||
self.dataset_dir, label, input_data[i], None
|
||||
)
|
||||
@ -387,7 +389,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
for i, component in enumerate(interface.output_components):
|
||||
label = interface.config["output_components"][i][
|
||||
"label"
|
||||
] or "Output_{}".format(i)
|
||||
] or "output_{}".format(i)
|
||||
filepath = (
|
||||
component.save_flagged(
|
||||
self.dataset_dir, label, output_data[i], None
|
||||
|
@ -31,7 +31,6 @@ from gradio.process_examples import load_from_cache, process_example
|
||||
from gradio.routes import PredictBody
|
||||
|
||||
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
|
||||
import flask
|
||||
import transformers
|
||||
|
||||
|
||||
@ -580,19 +579,24 @@ class Interface(Launchable):
|
||||
flag_option="" if self.flagging_options else None,
|
||||
username=username,
|
||||
)
|
||||
if self.stateful:
|
||||
updated_state = prediction[self.state_return_index]
|
||||
prediction[self.state_return_index] = None
|
||||
else:
|
||||
updated_state = None
|
||||
if self.stateful:
|
||||
updated_state = prediction[self.state_return_index]
|
||||
prediction[self.state_return_index] = None
|
||||
else:
|
||||
updated_state = None
|
||||
|
||||
return {
|
||||
durations = durations
|
||||
avg_durations = self.config.get("avg_durations")
|
||||
response = {
|
||||
"data": prediction,
|
||||
"durations": durations,
|
||||
"avg_durations": self.config.get("avg_durations"),
|
||||
"flag_index": flag_index,
|
||||
"updated_state": updated_state,
|
||||
}
|
||||
if durations is not None:
|
||||
response["durations"] = durations
|
||||
if avg_durations is not None:
|
||||
response["avg_durations"] = avg_durations
|
||||
return response
|
||||
|
||||
def process(self, raw_input: List[Any]) -> Tuple[List[Any], List[float]]:
|
||||
"""
|
||||
|
@ -11,7 +11,7 @@ from gradio import encryptor, networking, queueing, strings, utils # type: igno
|
||||
from gradio.process_examples import cache_interface_examples
|
||||
|
||||
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
|
||||
import flask
|
||||
import fastapi
|
||||
|
||||
|
||||
class Launchable:
|
||||
@ -42,7 +42,7 @@ class Launchable:
|
||||
ssl_keyfile: Optional[str] = None,
|
||||
ssl_certfile: Optional[str] = None,
|
||||
ssl_keyfile_password: Optional[str] = None,
|
||||
) -> Tuple[flask.Flask, str, str]:
|
||||
) -> Tuple[fastapi.FastAPI, str, str]:
|
||||
"""
|
||||
Launches the webserver that serves the UI for the interface.
|
||||
Parameters:
|
||||
@ -68,7 +68,7 @@ class Launchable:
|
||||
ssl_certfile (str): If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
|
||||
ssl_keyfile_password (str): If a password is provided, will use this with the ssl certificate for https.
|
||||
Returns:
|
||||
app (flask.Flask): Flask app object
|
||||
app (fastapi.FastAPI): FastAPI app object
|
||||
path_to_local_server (str): Locally accessible link
|
||||
share_url (str): Publicly accessible link (if share=True)
|
||||
"""
|
||||
|
@ -54,7 +54,7 @@ def cache_interface_examples(interface: Interface) -> None:
|
||||
def load_from_cache(interface: Interface, example_id: int) -> List[Any]:
|
||||
"""Loads a particular cached example for the interface."""
|
||||
with open(CACHE_FILE) as cache:
|
||||
examples = list(csv.reader(cache))
|
||||
examples = list(csv.reader(cache, quotechar="'"))
|
||||
example = examples[example_id + 1] # +1 to adjust for header
|
||||
output = []
|
||||
for component, cell in zip(interface.output_components, example):
|
||||
|
@ -224,8 +224,13 @@ def api_docs(request: Request):
|
||||
output_types_doc, output_types = get_types(outputs, "output")
|
||||
input_names = [type(inp).__name__ for inp in app.launchable.input_components]
|
||||
output_names = [type(out).__name__ for out in app.launchable.output_components]
|
||||
if app.launchable.examples is not None:
|
||||
sample_inputs = app.launchable.examples[0]
|
||||
if isinstance(app.launchable.examples, list):
|
||||
example = app.launchable.examples[0]
|
||||
sample_inputs = []
|
||||
for index, example_input in enumerate(example):
|
||||
sample_inputs.append(
|
||||
app.launchable.input_components[index].preprocess_example(example_input)
|
||||
)
|
||||
else:
|
||||
sample_inputs = [
|
||||
inp.generate_sample() for inp in app.launchable.input_components
|
||||
|
@ -22,6 +22,8 @@
|
||||
<noscript>
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans&display=swap">
|
||||
</noscript>
|
||||
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans&display=swap">
|
||||
<link rel="stylesheet" href="https://gradio.app/assets/prism.css">
|
||||
|
||||
<style>
|
||||
html {
|
||||
@ -211,6 +213,36 @@
|
||||
color: grey !important;
|
||||
pointer-events: none;
|
||||
}
|
||||
.copy {
|
||||
float: right;
|
||||
padding-right: 1em;
|
||||
background: whitesmoke;
|
||||
border: none !important;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.float-left {
|
||||
float:left;
|
||||
width: 90%;
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
.copy-svg {
|
||||
visibility: hidden;
|
||||
margin: 1em 0 0 0 !important;
|
||||
width: 20px;
|
||||
}
|
||||
|
||||
.code-block:hover .copy-svg {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
pre {
|
||||
float:left;
|
||||
width: 90%;
|
||||
overflow: auto !important;
|
||||
background: inherit !important;
|
||||
}
|
||||
</style>
|
||||
|
||||
<meta property="og:url" content="https://gradio.app/" />
|
||||
@ -286,23 +318,31 @@
|
||||
</ul>
|
||||
|
||||
<h4 id="payload">Payload: </h4>
|
||||
<div class="json">
|
||||
<div class="json code-block">
|
||||
<div class="float-left">
|
||||
<p>  {</p>
|
||||
<p>    "data": [{%for i in range(0, len_inputs)%} <span>{{ input_types[i]
|
||||
}}</span>{% if i != len_inputs - 1 %} ,{% endif %}{%endfor%} ]</p>
|
||||
<p>  }</p>
|
||||
</div>
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
{% if auth is not none %}
|
||||
<p>Note: This interface requires authentication. This means you will have to first post to the login api before you can post to the predict endpoint. See below for more info </p>
|
||||
{% endif %}
|
||||
<h4 id="response">Response: </h4>
|
||||
<div class="json">
|
||||
<div class="json code-block">
|
||||
<div class="float-left">
|
||||
<p>  {</p>
|
||||
<p>    "data": [{%for i in range(0, len_outputs)%} <span>{{ output_types[i]
|
||||
}}</span>{% if i != len_outputs - 1 %} ,{% endif %}{%endfor%} ],</p>
|
||||
<p>    "durations": [ float ], # the time taken for the prediction to complete</p>
|
||||
<p>    "avg_durations": [ float ] # the average time taken for all predictions so far (used to estimate the runtime)</p>
|
||||
<p>  }</p>
|
||||
</div>
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
<h4 id="try-it">Try it (live demo): </h4>
|
||||
|
||||
@ -325,16 +365,9 @@
|
||||
|
||||
|
||||
|
||||
<div class="json">
|
||||
{% if auth is not none %}
|
||||
<!-- import requests-->
|
||||
|
||||
<!-- sess = requests.session()-->
|
||||
<!-- sess.post(url='INTERFACE_URL/login', data={"username": "USERNAME", "password":"PASSWORD"})-->
|
||||
<!-- r = sess.post(url='INTERFACE_URL/api/predict/',json={"data":[INPUT]}, )-->
|
||||
<!-- -->
|
||||
<!-- print(r.json())-->
|
||||
|
||||
<div class="json code-block">
|
||||
<div class="float-left">
|
||||
{% if auth is not none %}
|
||||
<p class="syntax">import requests</p>
|
||||
<br>
|
||||
<p class="syntax">sess = requests.session()</p>
|
||||
@ -363,11 +396,15 @@
|
||||
<p>r.json()</p>
|
||||
|
||||
{% endif %}
|
||||
</div>
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="demo-window" demo="2">
|
||||
<div class="json">
|
||||
<div class="json code-block">
|
||||
<div class="float-left">
|
||||
{% if auth is not none %}
|
||||
<p class="syntax">curl -X POST -F 'username=USERNAME' -F 'password=PASSWORD' <span class="syntax" id="curl_syntax_url_login"></span> -c cookies.txt </p>
|
||||
|
||||
@ -386,10 +423,14 @@
|
||||
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="demo-window" demo="3">
|
||||
<div class="json">
|
||||
<div class="json code-block">
|
||||
<div class="float-left">
|
||||
{% if auth is not none %}
|
||||
<p class="syntax">// Will only work locally.</p>
|
||||
<br>
|
||||
@ -415,6 +456,9 @@
|
||||
console.log(json_response) })</p>
|
||||
</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -437,11 +481,20 @@
|
||||
<p>  }</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="hidden" id="related-methods-holder">
|
||||
<h4 id="related">Related Methods: </h4>
|
||||
<div class="json code-block">
|
||||
<pre><code class="language-python float-left" id="related-methods"></code></pre>
|
||||
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
|
||||
<div></div></button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</main>
|
||||
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
|
||||
|
||||
<script>
|
||||
var len_inputs = {{ len_inputs }};
|
||||
var len_outputs = {{ len_outputs }};
|
||||
@ -538,6 +591,78 @@
|
||||
$(`.demo-window[demo="${demo_num}"]`).show();
|
||||
})
|
||||
|
||||
var inputRelatedMethods = {
|
||||
'Image': `# To convert your image file into the base64 format required by the API
|
||||
gr.processing_utils.encode_url_or_file_to_base64(path)
|
||||
|
||||
`,
|
||||
'Video':`# To convert your video file into the base64 format required by the API
|
||||
gr.processing_utils.encode_url_or_file_to_base64(path)
|
||||
|
||||
`,
|
||||
'Audio':`# To convert your audio file into the base64 format required by the API
|
||||
gr.processing_utils.encode_url_or_file_to_base64(path)
|
||||
|
||||
`,
|
||||
'File':`# To convert your file into the base64 format required by the API
|
||||
gr.processing_utils.encode_url_or_file_to_base64(path)
|
||||
|
||||
`
|
||||
|
||||
}
|
||||
|
||||
var outputRelatedMethods = {
|
||||
'Image': `# To convert the base64 image returned by the API to an image tmp file object
|
||||
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
|
||||
|
||||
`,
|
||||
'Video': `# To convert the base64 video returned by the API to an video tmp file object
|
||||
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
|
||||
|
||||
`,
|
||||
'Audio': `# To convert the base64 audio returned by the API to an audio tmp file object
|
||||
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
|
||||
|
||||
`,
|
||||
'File': `# To convert the base64 file returned by the API to a regular tmp file object
|
||||
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
|
||||
|
||||
`
|
||||
}
|
||||
|
||||
|
||||
var showRelated = false;
|
||||
var relatedMethods = `import gradio as gr
|
||||
|
||||
`
|
||||
|
||||
{% for i in range(len_inputs) %}
|
||||
if ("{{inputs[i]}}" in inputRelatedMethods) {
|
||||
showRelated = true;
|
||||
relatedMethods += inputRelatedMethods["{{inputs[i]}}"]
|
||||
}
|
||||
{% endfor %}
|
||||
|
||||
{% for i in range(len_outputs) %}
|
||||
if ("{{outputs[i]}}" in outputRelatedMethods) {
|
||||
showRelated = true;
|
||||
relatedMethods += outputRelatedMethods["{{outputs[i]}}"]
|
||||
}
|
||||
{% endfor %}
|
||||
|
||||
if (showRelated) {
|
||||
document.getElementById('related-methods').innerHTML = relatedMethods;
|
||||
$('#related-methods-holder').removeClass("hidden");
|
||||
}
|
||||
|
||||
function copyCode(elem) {
|
||||
var text = elem.parentElement.innerText;
|
||||
navigator.clipboard.writeText(text);
|
||||
elem.firstChild.style="fill: #eb9f59;"
|
||||
setTimeout(function(){
|
||||
elem.firstChild.style="fill: #808080;"
|
||||
}, 600);
|
||||
};
|
||||
|
||||
var friendlyHttpStatus = {
|
||||
'200': 'OK',
|
||||
@ -584,6 +709,8 @@
|
||||
};
|
||||
|
||||
</script>
|
||||
<script src="https://gradio.app/assets/prism.js"></script>
|
||||
|
||||
</body>
|
||||
|
||||
</html>
|
@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import copy
|
||||
import csv
|
||||
import inspect
|
||||
import json
|
||||
@ -10,7 +11,7 @@ import os
|
||||
import random
|
||||
import warnings
|
||||
from distutils.version import StrictVersion
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List
|
||||
|
||||
import aiohttp
|
||||
import analytics
|
||||
@ -214,29 +215,34 @@ def get_config_file(interface: Interface) -> Dict[str, Any]:
|
||||
}
|
||||
try:
|
||||
param_names = inspect.getfullargspec(interface.predict[0])[0]
|
||||
for iface, param in zip(config["input_components"], param_names):
|
||||
if not iface["label"]:
|
||||
iface["label"] = param.replace("_", " ")
|
||||
for i, iface in enumerate(config["output_components"]):
|
||||
for index, component in enumerate(config["input_components"]):
|
||||
if not component["label"]:
|
||||
if index < len(param_names):
|
||||
component["label"] = param_names[index].replace("_", " ")
|
||||
else:
|
||||
component["label"] = (
|
||||
f"input {index + 1}"
|
||||
if len(config["input_components"]) > 1
|
||||
else "input"
|
||||
)
|
||||
for index, component in enumerate(config["output_components"]):
|
||||
outputs_per_function = int(
|
||||
len(interface.output_components) / len(interface.predict)
|
||||
)
|
||||
function_index = i // outputs_per_function
|
||||
component_index = i - function_index * outputs_per_function
|
||||
ret_name = (
|
||||
"Output " + str(component_index + 1)
|
||||
if outputs_per_function > 1
|
||||
else "Output"
|
||||
)
|
||||
if iface["label"] is None:
|
||||
iface["label"] = ret_name
|
||||
function_index = index // outputs_per_function
|
||||
component_index = index - function_index * outputs_per_function
|
||||
if component["label"] is None:
|
||||
component["label"] = (
|
||||
f"output {component_index + 1}"
|
||||
if outputs_per_function > 1
|
||||
else "output"
|
||||
)
|
||||
if len(interface.predict) > 1:
|
||||
iface["label"] = (
|
||||
component["label"] = (
|
||||
interface.function_names[function_index].replace("_", " ")
|
||||
+ ": "
|
||||
+ iface["label"]
|
||||
+ component["label"]
|
||||
)
|
||||
|
||||
except ValueError:
|
||||
pass
|
||||
if interface.examples is not None:
|
||||
@ -286,3 +292,20 @@ def get_default_args(func: Callable) -> Dict[str, Any]:
|
||||
v.default if v.default is not inspect.Parameter.empty else None
|
||||
for v in signature.parameters.values()
|
||||
]
|
||||
|
||||
|
||||
def format_ner_list(input_string: str, ner_groups: Dict[str : str | int]):
|
||||
if len(ner_groups) == 0:
|
||||
return [(input_string, None)]
|
||||
|
||||
output = []
|
||||
prev_end = 0
|
||||
|
||||
for group in ner_groups:
|
||||
entity, start, end = group["entity_group"], group["start"], group["end"]
|
||||
output.append((input_string[prev_end:start], None))
|
||||
output.append((input_string[start:end], entity))
|
||||
prev_end = end
|
||||
|
||||
output.append((input_string[end:], None))
|
||||
return output
|
||||
|
@ -1 +1 @@
|
||||
2.8.9
|
||||
2.8.14
|
81
guides/adding_rich_descriptions_to_your_demo.md
Normal file
@ -0,0 +1,81 @@
|
||||
# Adding Rich Descriptions to Your Demo
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/ThomasSimonini/Chat-with-Gandalf-GPT-J6B, https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot, https://huggingface.co/spaces/nateraw/cryptopunks-generator
|
||||
tags: MARKDOWN, DESCRIPTION, ARTICLE
|
||||
|
||||
## Introduction
|
||||
|
||||
When an interface is shared, it is usually accompanied with some form of explanatory text, links or images. This guide will go over how to easily add these on gradio.
|
||||
|
||||
For example, take a look at this fun chatbot interface below. It has a title, description, image as well as a link in the bottom.
|
||||
|
||||
<iframe src="https://hf.space/embed/aliabd/rick-and-morty/+" frameBorder="0" height="875" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
## The parameters in `Interface`
|
||||
|
||||
There are three parameters in `Interface` where text can go:
|
||||
|
||||
* `title`: which accepts text and can displays it at the very top of interface
|
||||
* `description`: which accepts text, markdown or HTML and places it right under the title.
|
||||
* `article`: which is also accepts text, markdown or HTML but places it below the interface.
|
||||
|
||||
![annotated](website/src/assets/img/guides/adding_rich_descriptions_to_your_demo/annotated.png)
|
||||
|
||||
## Code example
|
||||
|
||||
Here's all the text-related code required to recreate the interface shown above.
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
title = "Ask Rick a Question"
|
||||
description = """
|
||||
<center>
|
||||
The bot was trained to answer questions based on Rick and Morty dialogues. Ask Rick anything!
|
||||
<img src="https://huggingface.co/spaces/course-demos/Rick_and_Morty_QA/resolve/main/rick.png" width=200px>
|
||||
</center>
|
||||
"""
|
||||
|
||||
article = "Check out [the original Rick and Morty Bot](https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot) that this demo is based off of."
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
|
||||
model = AutoModelForCausalLM.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
|
||||
|
||||
def predict(input):
|
||||
# tokenize the new input sentence
|
||||
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
|
||||
|
||||
# generate a response
|
||||
history = model.generate(new_user_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
|
||||
|
||||
# convert the tokens to text, and then split the responses into the right format
|
||||
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
||||
return response[1]
|
||||
|
||||
gr.Interface(fn = predict, inputs = ["textbox"], outputs = ["text"], title = title, description = description, article = article).launch()
|
||||
|
||||
```
|
||||
|
||||
Of course, you don't have to use HTML and can instead rely on markdown, like we've done in the `article` parameter above.
|
||||
|
||||
The table below shows the syntax for the most common markdown commands.
|
||||
|
||||
| Type | Syntax |
|
||||
| ----------- | ----------- |
|
||||
| Header | # Heading 1 ## Heading 2 ### Heading 3 |
|
||||
| Link | \[gradio's website](https://gradio.app) |
|
||||
| Image | !\[gradio's logo](https://gradio.app/assets/img/logo.png) |
|
||||
| Text Formatting | \_italic_ \*\*bold** |
|
||||
| List | \* Item 1 \* Item 2 |
|
||||
| Quote | \> this is a quote |
|
||||
| Code | Inline \`code\` has \`back-ticks around\` it. |
|
||||
|
||||
|
||||
|
||||
Here's a neat [cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) with more.
|
||||
|
||||
|
||||
### That's all! Happy building :)
|
@ -9,7 +9,7 @@ How well can an algorithm guess what you're drawing? A few years ago, Google rel
|
||||
|
||||
Such models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Let's get started!
|
||||
|
||||
@ -97,7 +97,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try drawing something, like a "snake" or a "laptop"):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -11,7 +11,7 @@ Using `gradio`, you can easily build a demo of your chatbot model and share that
|
||||
|
||||
This tutorial will show how to take a pretrained chatbot model and deploy it with a Gradio interface in 4 steps. The live chatbot interface that we create will look something like this (try it!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Chatbots are *stateful*, meaning that the model's prediction can change depending on how the user has previously interacted with the model. So, in this tutorial, we will also cover how to use **state** with Gradio demos.
|
||||
|
||||
@ -86,7 +86,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try typing in some simple greetings like "Hi!" to get started):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-minimal/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/chatbot-minimal/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
----------
|
||||
|
@ -37,6 +37,7 @@ With these three arguments, we can quickly create interfaces and `launch()` th
|
||||
Let's say we want to customize the input text field - for example, we wanted it to be larger and have a text hint. If we use the actual input class for `Textbox` instead of using the string shortcut, we have access to much more customizability. To see a list of all the components we support and how you can customize them, check out the [Docs](https://gradio.app/docs).
|
||||
|
||||
**Sidenote**: `Interface.launch()` method returns 3 values:
|
||||
|
||||
1. `app`, which is the FastAPI application that is powering the Gradio demo
|
||||
2. `local_url`, which is the local address of the server
|
||||
3. `share_url`, which is the public address for this demo (it is generated if `share=True` more [on this later](https://gradio.app/getting_started/#sharing-interfaces-publicly))
|
||||
@ -161,7 +162,7 @@ Share links expire after 72 hours. For permanent hosting, see Hosting Gradio App
|
||||
|
||||
### Hosting Gradio Apps on Spaces
|
||||
|
||||
Huggingface provides the infrastructure to permanently host your Gradio model on the internet, for free! You can either drag and drop a folder containing your Gradio model and all related files, or you can point HF Spaces to your Git repository and HP Spaces will pull the Gradio interface from there. See [Huggingface Spaces](http://huggingface.co/spaces/) for more information.
|
||||
Huggingface provides the infrastructure to permanently host your Gradio model on the internet, for free! You can either drag and drop a folder containing your Gradio model and all related files, or you can point HF Spaces to your Git repository and HF Spaces will pull the Gradio interface from there. See [Huggingface Spaces](http://huggingface.co/spaces/) for more information.
|
||||
|
||||
![Hosting Demo](/assets/img/hf_demo.gif)
|
||||
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -82,7 +82,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try uploading your own examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -80,7 +80,7 @@ gr.Interface(fn=classify_image,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try uploading your own examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
State-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -47,7 +47,7 @@ Notice that we have added one more parameter, the `examples`, which allows us to
|
||||
|
||||
This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -63,7 +63,7 @@ iface = gr.Interface(
|
||||
iface.launch()
|
||||
```
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flag-basic/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flag-basic/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.
|
||||
@ -112,7 +112,7 @@ iface = gr.Interface(
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-options/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flagging-options/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
When users click the flag button, the csv file will now include a column indicating the selected option.
|
||||
|
||||
@ -146,7 +146,7 @@ iface = gr.Interface(
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-crowdsourced/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flagging-crowdsourced/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
You can now see all the examples flagged above in this [public HF dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo/blob/main/data.csv).
|
||||
|
||||
|
@ -39,7 +39,7 @@ iface.launch()
|
||||
|
||||
The previous code produces the following interface, which you can try right here in your browser:
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/helsinki_translation_en_es/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/helsinki_translation_en_es/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
This demo requires installing four libraries: gradio, torch, transformers, and sentencepiece. Apart from that, this is a Gradio with the structure you're used to! The demo is a usual Gradio `Interface` with a prediction function, a specified input, and a specified output. The prediction function executes the `pipeline` function with the given input, retrieves the first (and only) translation result, and returns the `translation_text` field, which you're interested in.
|
||||
|
||||
@ -91,7 +91,7 @@ gr.Interface.load("spaces/eugenesiow/remove-bg").launch()
|
||||
|
||||
The code snippet above will load the same interface as the corresponding Space demo.
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/eugenesiow/remove-bg/+" frameBorder="0" height="900" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/eugenesiow/remove-bg/+" frameBorder="0" height="900" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
You can change UI elements, such as the title or theme, but also change the expected type. The previous Space expected users to upload images. What if you would like users to have their webcam and remove the background from there? You can load the Space but change the source of input as follows:
|
||||
@ -107,7 +107,7 @@ gr.Interface.load(
|
||||
|
||||
The code above generates the following demo.
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/remove-bg-webcam/+" frameBorder="0" height="600" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/remove-bg-webcam/+" frameBorder="0" height="600" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
As you can see, the demo looks the same, but it uses a webcam input instead of user-uploaded images.
|
||||
|
||||
@ -136,7 +136,7 @@ iface3.launch()
|
||||
|
||||
`iface1` and `iface2` are loading existing Spaces. Then, with `Parallel`, you can run the interfaces parallelly. When you click submit, you will get the output for both interfaces. This is how the demo looks like:
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Although both models are generative, you can see that the way both models behave is very different. That's a powerful application of `Parallel`!
|
||||
|
||||
@ -145,7 +145,7 @@ Although both models are generative, you can see that the way both models behave
|
||||
Throughout this guide, you've seen there are Gradio demos embedded. You can also do this on own website! The first step is to create a Space with the demo you want to showcase. You can embed it in your HTML code, as shown in the following self-contained example.
|
||||
|
||||
```bash
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
```
|
||||
|
||||
## Recap
|
||||
|
49
guides/using_the_api_docs.md
Normal file
@ -0,0 +1,49 @@
|
||||
# Using the API Docs
|
||||
|
||||
tags: API
|
||||
|
||||
## Introduction
|
||||
|
||||
Every gradio interface comes with an API you can use directly. To find out how to use it, just click the `view the api` button at the bottom of the page (whether its hosted on spaces, generated using `share=True`, or running locally).
|
||||
|
||||
![view the api button](website/src/assets/img/guides/using_the_api_docs/view-the-api-button.gif)
|
||||
|
||||
This button opens up interface-specific API docs. This will show you the predict endpoint, payload, response, as well as sample code snippets in Python, JS and cURL.
|
||||
|
||||
# What will the API docs tell you?
|
||||
|
||||
Below is an (iframed) example: the API Docs of [this space](https://huggingface.co/spaces/aliabd/nubia).
|
||||
|
||||
<iframe src="https://hf.space/embed/aliabd/nubia/api" frameBorder="5" height="725" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
It shows that there are 7 sections on the page
|
||||
|
||||
* The predict **endpoint**:
|
||||
* Where to send the payload (`https://hf.space/embed/aliabd/nubia/+/api/predict/`). This is likely the most important piece of information as it defines where the request will be sent.
|
||||
* The **inputs** and their types
|
||||
* The **outputs** and their types
|
||||
* The **payload**:
|
||||
* What to send and how to structure it. It will always look like:
|
||||
```python
|
||||
{
|
||||
"data": [ input_1, input_2 ... ]
|
||||
}
|
||||
```
|
||||
* The **response**:
|
||||
* What to expect to receive. It will always look like:
|
||||
```python
|
||||
{
|
||||
"data": [ output_1, output_2 ... ],
|
||||
"durations": [ float ], # the time taken for the prediction to complete
|
||||
"avg_durations": [ float ] # the average time taken for all predictions so far (used to estimate the runtime)
|
||||
}
|
||||
```
|
||||
|
||||
* A live **demo** and **code snippets** in Python, JS and cURL
|
||||
* You can go directly to this section if you want to quickly try out the API and play around with it.
|
||||
* Other **methods** related to the inputs/outputs
|
||||
* Use gradio's helper methods to quickly convert your files to base64 and other formats required by the API.
|
||||
|
||||
|
||||
### That's all! Happy building :)
|
@ -5,7 +5,7 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
|
||||
else
|
||||
echo "Uploading to pypi"
|
||||
set -e
|
||||
git pull origin master
|
||||
git pull origin main
|
||||
old_version=$(grep -Po "(?<=version=\")[^\"]+(?=\")" setup.py)
|
||||
echo "Current version is $old_version. New version?"
|
||||
read new_version
|
||||
|
2
setup.py
@ -5,7 +5,7 @@ except ImportError:
|
||||
|
||||
setup(
|
||||
name="gradio",
|
||||
version="2.8.9",
|
||||
version="2.8.14",
|
||||
include_package_data=True,
|
||||
description="Python library for easily interacting with trained machine learning models",
|
||||
author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq",
|
||||
|
Before Width: | Height: | Size: 32 KiB After Width: | Height: | Size: 32 KiB |
Before Width: | Height: | Size: 237 KiB After Width: | Height: | Size: 237 KiB |
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
@ -10,6 +10,7 @@ import requests
|
||||
from gradio.utils import (
|
||||
colab_check,
|
||||
error_analytics,
|
||||
format_ner_list,
|
||||
get_local_ip_address,
|
||||
ipython_check,
|
||||
json,
|
||||
@ -116,5 +117,28 @@ class TestIPAddress(unittest.TestCase):
|
||||
self.assertEqual(ip, "No internet connection")
|
||||
|
||||
|
||||
class TestFormatNERList(unittest.TestCase):
|
||||
def test_format_ner_list_standard(self):
|
||||
string = "Wolfgang lives in Berlin"
|
||||
groups = [
|
||||
{"entity_group": "PER", "start": 0, "end": 8},
|
||||
{"entity_group": "LOC", "start": 18, "end": 24},
|
||||
]
|
||||
result = [
|
||||
("", None),
|
||||
("Wolfgang", "PER"),
|
||||
(" lives in ", None),
|
||||
("Berlin", "LOC"),
|
||||
("", None),
|
||||
]
|
||||
self.assertEqual(format_ner_list(string, groups), result)
|
||||
|
||||
def test_format_ner_list_empty(self):
|
||||
string = "I live in a city"
|
||||
groups = []
|
||||
result = [("I live in a city", None)]
|
||||
self.assertEqual(format_ner_list(string, groups), result)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -1,5 +1,15 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" style="height: 100%; margin: 0; padding: 0">
|
||||
<html
|
||||
lang="en"
|
||||
style="
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-grow: 1;
|
||||
height: 100%;
|
||||
"
|
||||
>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta
|
||||
@ -47,7 +57,18 @@
|
||||
<title>Gradio</title>
|
||||
</head>
|
||||
|
||||
<body style="height: 100%; margin: 0; padding: 0">
|
||||
<div id="root" style="min-height: 100%"></div>
|
||||
<body
|
||||
style="
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex-grow: 1;
|
||||
"
|
||||
>
|
||||
<div
|
||||
id="root"
|
||||
style="display: flex; flex-direction: column; flex-grow: 1"
|
||||
></div>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -13,6 +13,7 @@
|
||||
import "./themes/grass.css";
|
||||
import "./themes/peach.css";
|
||||
import "./themes/seafoam.css";
|
||||
import "./themes/typography.min.css";
|
||||
import { _ } from "svelte-i18n";
|
||||
import { setupi18n } from "./i18n";
|
||||
setupi18n();
|
||||
@ -30,6 +31,7 @@
|
||||
export let input_components: Array<Component>;
|
||||
export let output_components: Array<Component>;
|
||||
export let examples: Array<Array<unknown>>;
|
||||
export let examples_per_page: number;
|
||||
export let fn: (...args: any) => Promise<unknown>;
|
||||
export let root: string;
|
||||
export let space: string | undefined = undefined;
|
||||
@ -60,12 +62,13 @@
|
||||
<h1 class="title text-center p-4 text-4xl">{title}</h1>
|
||||
{/if}
|
||||
{#if description}
|
||||
<p class="description pb-4">{@html description}</p>
|
||||
<p class="description prose max-w-none pb-4">{@html description}</p>
|
||||
{/if}
|
||||
<Interface
|
||||
{input_components}
|
||||
{output_components}
|
||||
{examples}
|
||||
{examples_per_page}
|
||||
{theme}
|
||||
{fn}
|
||||
{root}
|
||||
|
@ -15,8 +15,38 @@
|
||||
export let input_components: Array<Component>;
|
||||
export let theme: string;
|
||||
|
||||
let selected_examples = examples;
|
||||
let page = 0;
|
||||
let gallery = input_components.length === 1;
|
||||
let paginate = examples.length > examples_per_page;
|
||||
|
||||
let selected_examples: Array<Array<unknown>>;
|
||||
let page_count: number;
|
||||
let visible_pages: Array<number> = [];
|
||||
$: {
|
||||
if (paginate) {
|
||||
visible_pages = [];
|
||||
selected_examples = examples.slice(
|
||||
page * examples_per_page,
|
||||
(page + 1) * examples_per_page
|
||||
);
|
||||
page_count = Math.ceil(examples.length / examples_per_page);
|
||||
[0, page, page_count - 1].forEach((anchor) => {
|
||||
for (let i = anchor - 2; i <= anchor + 2; i++) {
|
||||
if (i >= 0 && i < page_count && !visible_pages.includes(i)) {
|
||||
if (
|
||||
visible_pages.length > 0 &&
|
||||
i - visible_pages[visible_pages.length - 1] > 1
|
||||
) {
|
||||
visible_pages.push(-1);
|
||||
}
|
||||
visible_pages.push(i);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
selected_examples = examples.slice();
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="examples" {theme}>
|
||||
@ -31,7 +61,8 @@
|
||||
{#each selected_examples as example_row, i}
|
||||
<button
|
||||
class="example cursor-pointer p-2 rounded bg-gray-50 dark:bg-gray-700 transition"
|
||||
on:click={() => setExampleId(i)}
|
||||
class:selected={i + page * examples_per_page === example_id}
|
||||
on:click={() => setExampleId(i + page * examples_per_page)}
|
||||
>
|
||||
<svelte:component
|
||||
this={input_component_map[input_components[0].name].example}
|
||||
@ -59,8 +90,8 @@
|
||||
{#each selected_examples as example_row, i}
|
||||
<tr
|
||||
class="cursor-pointer transition"
|
||||
class:selected={i === example_id}
|
||||
on:click={() => setExampleId(i)}
|
||||
class:selected={i + page * examples_per_page === example_id}
|
||||
on:click={() => setExampleId(i + page * examples_per_page)}
|
||||
>
|
||||
{#each example_row as example_cell, j}
|
||||
<td class="py-2 px-4">
|
||||
@ -78,6 +109,24 @@
|
||||
</table>
|
||||
{/if}
|
||||
</div>
|
||||
{#if paginate}
|
||||
<div class="flex gap-2 items-center mt-4">
|
||||
Pages:
|
||||
{#each visible_pages as visible_page}
|
||||
{#if visible_page === -1}
|
||||
<div>...</div>
|
||||
{:else}
|
||||
<button
|
||||
class="page"
|
||||
class:font-bold={page === visible_page}
|
||||
on:click={() => (page = visible_page)}
|
||||
>
|
||||
{visible_page + 1}
|
||||
</button>
|
||||
{/if}
|
||||
{/each}
|
||||
</div>
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<style lang="postcss" global>
|
||||
@ -102,5 +151,11 @@
|
||||
@apply bg-amber-500 text-white;
|
||||
}
|
||||
}
|
||||
.examples-table tr.selected {
|
||||
@apply font-semibold;
|
||||
}
|
||||
.page {
|
||||
@apply py-1 px-2 bg-gray-100 dark:bg-gray-700 rounded;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
|
@ -17,6 +17,7 @@
|
||||
export let theme: string;
|
||||
export let fn: (...args: any) => Promise<unknown>;
|
||||
export let examples: Array<Array<unknown>>;
|
||||
export let examples_per_page: number;
|
||||
export let root: string;
|
||||
export let allow_flagging: string;
|
||||
export let flagging_options: Array<string> | undefined = undefined;
|
||||
@ -50,8 +51,10 @@
|
||||
let timer_diff = 0;
|
||||
let avg_duration = Array.isArray(avg_durations) ? avg_durations[0] : null;
|
||||
let expected_duration: number | null = null;
|
||||
let example_id: number | null = null;
|
||||
|
||||
const setValues = async (index: number, value: unknown) => {
|
||||
const setValues = (index: number, value: unknown) => {
|
||||
example_id = null;
|
||||
has_changed = true;
|
||||
input_values[index] = value;
|
||||
if (live && state !== "PENDING") {
|
||||
@ -59,7 +62,8 @@
|
||||
}
|
||||
};
|
||||
|
||||
const setExampleId = async (example_id: number) => {
|
||||
const setExampleId = async (_id: number) => {
|
||||
example_id = _id;
|
||||
input_components.forEach(async (input_component, i) => {
|
||||
const process_example =
|
||||
input_component_map[input_component.name].process_example;
|
||||
@ -72,6 +76,7 @@
|
||||
input_values[i] = examples[example_id][i];
|
||||
}
|
||||
});
|
||||
example_id = _id;
|
||||
};
|
||||
|
||||
const startTimer = () => {
|
||||
@ -108,7 +113,7 @@
|
||||
try {
|
||||
output = await fn(
|
||||
"predict",
|
||||
{ data: input_values, cleared: cleared_since_last_submit },
|
||||
{ data: input_values, cleared: cleared_since_last_submit, example_id: example_id },
|
||||
queue,
|
||||
queueCallback
|
||||
);
|
||||
@ -359,6 +364,8 @@
|
||||
{#if examples}
|
||||
<ExampleSet
|
||||
{examples}
|
||||
{examples_per_page}
|
||||
{example_id}
|
||||
{input_components}
|
||||
{theme}
|
||||
{examples_dir}
|
||||
|
@ -2,14 +2,13 @@
|
||||
export let value: number;
|
||||
export let setValue: (val: number) => number;
|
||||
export let theme: string;
|
||||
|
||||
$: setValue(value);
|
||||
</script>
|
||||
|
||||
<input
|
||||
type="number"
|
||||
class="input-number w-full rounded box-border p-2 focus:outline-none appearance-none"
|
||||
bind:value
|
||||
{value}
|
||||
on:input={(e) => setValue(e.target.value)}
|
||||
{theme}
|
||||
/>
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
class="input-text w-full rounded box-border p-2 focus:outline-none appearance-none"
|
||||
{value}
|
||||
{placeholder}
|
||||
rows={lines}
|
||||
on:input={(e) => setValue(e.target.value)}
|
||||
{theme}
|
||||
/>
|
||||
|
@ -1,11 +1,12 @@
|
||||
<script lang="ts">
|
||||
import { afterUpdate } from "svelte";
|
||||
export let value: string;
|
||||
export let theme: string;
|
||||
|
||||
let audio: HTMLAudioElement;
|
||||
afterUpdate(() => (audio.src = value));
|
||||
</script>
|
||||
|
||||
<audio {theme} controls>
|
||||
<audio bind:this={audio} class="w-full" {theme} controls>
|
||||
<source src={value} />
|
||||
</audio>
|
||||
|
||||
<style lang="postcss">
|
||||
</style>
|
||||
|
@ -114,7 +114,7 @@ window.launchGradio = (config: Config, element_query: string) => {
|
||||
};
|
||||
|
||||
window.launchGradioFromSpaces = async (space: string, target: string) => {
|
||||
const space_url = `https://huggingface.co/gradioiframe/${space}/+/`;
|
||||
const space_url = `https://hf.space/embed/${space}/+/`;
|
||||
let config = await fetch(space_url + "config");
|
||||
let _config: Config = await config.json();
|
||||
_config.root = space_url;
|
||||
|
6472
ui/packages/app/src/themes/typography.min.css
vendored
Normal file
@ -3,11 +3,7 @@ module.exports = {
|
||||
content: ["./src/**/*.svelte"],
|
||||
mode: "jit",
|
||||
darkMode: "class", // or 'media' or 'class'
|
||||
theme: {
|
||||
extend: {}
|
||||
},
|
||||
variants: {
|
||||
extend: {}
|
||||
},
|
||||
plugins: []
|
||||
}
|
||||
};
|
||||
|
@ -52,14 +52,17 @@ def render_index():
|
||||
generated_template.write(output_html)
|
||||
|
||||
|
||||
guide_files = ["getting_started.md"]
|
||||
all_guides = sorted(os.listdir(GRADIO_GUIDES_DIR))
|
||||
guide_files.extend([file for file in all_guides if file != "getting_started.md"])
|
||||
guides = []
|
||||
for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
|
||||
for guide in guide_files:
|
||||
if guide.lower() == "readme.md":
|
||||
continue
|
||||
guide_name = guide[:-3]
|
||||
pretty_guide_name = " ".join(
|
||||
[
|
||||
word.capitalize().replace("Ml", "ML").replace("Gan", "GAN")
|
||||
word.capitalize().replace("Ml", "ML").replace("Gan", "GAN").replace("Api", "API")
|
||||
for word in guide_name.split("_")
|
||||
]
|
||||
)
|
||||
@ -84,8 +87,12 @@ for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
|
||||
[
|
||||
line
|
||||
for line in guide_content.split("\n")
|
||||
if not (line.startswith("tags: ") or line.startswith("related_spaces: ") or
|
||||
line.startswith("Contributed by ") or line == title)
|
||||
if not (
|
||||
line.startswith("tags: ")
|
||||
or line.startswith("related_spaces: ")
|
||||
or line.startswith("Contributed by ")
|
||||
or line == title
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
@ -97,16 +104,15 @@ for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
|
||||
"tags": tags,
|
||||
"spaces": spaces,
|
||||
"url": url,
|
||||
"contributor": contributor
|
||||
"contributor": contributor,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def render_guides_main():
|
||||
filtered_guides = [guide for guide in guides if guide["name"] != "getting_started"]
|
||||
with open("src/guides_main_template.html", encoding="utf-8") as template_file:
|
||||
template = Template(template_file.read())
|
||||
output_html = template.render(guides=filtered_guides, navbar_html=navbar_html)
|
||||
output_html = template.render(guides=guides, navbar_html=navbar_html)
|
||||
os.makedirs(os.path.join("generated", "guides"), exist_ok=True)
|
||||
with open(
|
||||
os.path.join("generated", "guides", "index.html"), "w", encoding="utf-8"
|
||||
@ -173,7 +179,7 @@ def render_guides():
|
||||
guide_output = guide_output.replace("</pre>", f"</pre>{copy_button}</div>")
|
||||
|
||||
output_html = markdown2.markdown(
|
||||
guide_output, extras=["target-blank-links", "header-ids"]
|
||||
guide_output, extras=["target-blank-links", "header-ids", "tables"]
|
||||
)
|
||||
os.makedirs("generated", exist_ok=True)
|
||||
os.makedirs(os.path.join("generated", guide["name"]), exist_ok=True)
|
||||
|
After Width: | Height: | Size: 397 KiB |
After Width: | Height: | Size: 236 KiB |
@ -122,18 +122,6 @@
|
||||
</div>
|
||||
|
||||
<div id="guide-list" class="grid grid-cols-1 lg:grid-cols-3 gap-12 pt-12">
|
||||
|
||||
<a class="flex lg:col-span-1 flex-col group overflow-hidden relative rounded-xl shadow-sm hover:shadow-alternate transition-shadow" href="/getting_started">
|
||||
<div class="flex flex-col p-4" style="
|
||||
height: min-content;">
|
||||
<h2 class="font-semibold group-hover:underline text-xl">Getting Started
|
||||
</h2>
|
||||
<div class="tags-holder">
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
|
||||
|
||||
{% for guide in guides %}
|
||||
<a class="flex lg:col-span-1 flex-col group overflow-hidden relative rounded-xl shadow-sm hover:shadow-alternate transition-shadow" href="/{{ guide.name }}">
|
||||
<div class="flex flex-col p-4" style="
|
||||
@ -141,12 +129,14 @@
|
||||
<h2 class="font-semibold group-hover:underline text-xl">{{ guide.pretty_name }}
|
||||
</h2>
|
||||
<div class="tags-holder">
|
||||
{% if guide.tags is not none %}
|
||||
<p>
|
||||
{% for tag in guide.tags %}
|
||||
{{ tag }}<!--
|
||||
-->{% if not loop.last %}, {% endif %}
|
||||
{% endfor %}
|
||||
</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
@ -232,7 +222,7 @@
|
||||
if (txtValue.toUpperCase().indexOf(filter) > -1 || guideContent.toUpperCase().indexOf(filter) > -1) {
|
||||
a[{{ loop.index - 1}}].style.display = "";
|
||||
} else {
|
||||
a[{{ loop.index - 1 }}].style.display = "none";
|
||||
a[{{ loop.index - 1}}].style.display = "none";
|
||||
counter++;
|
||||
}
|
||||
{% endfor %}
|
||||
|
@ -88,6 +88,20 @@
|
||||
padding-inline-start: 40px;
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
table {
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
th {
|
||||
background: #ccc;
|
||||
}
|
||||
|
||||
th, td {
|
||||
border: 1px solid #ccc;
|
||||
padding: 10px;
|
||||
}
|
||||
|
||||
</style>
|
||||
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1"></script>
|
||||
<script>
|
||||
|
7
website/reload_website.sh
Executable file → Normal file
@ -1,4 +1,5 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
. /home/ubuntu/.bashrc
|
||||
export PATH="/usr/local/bin:/usr/bin:/bin"
|
||||
|
||||
@ -13,4 +14,8 @@ else
|
||||
fi
|
||||
docker-compose build
|
||||
docker-compose up -d
|
||||
fi
|
||||
|
||||
LATEST=$(git log -1 | fgrep commit)$(git log -1 | tail -1)
|
||||
curl -X POST -H 'Content-type: application/json' --data '{"text":"gradio.app relaoded successfully! :ship:\n\n Latest live commit:\n>`'"${LATEST}"'`"}' ${SLACK_WEBHOOK}
|
||||
|
||||
fi
|
||||
|
11
website/reload_with_notification.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
. /home/ubuntu/.bashrc
|
||||
export PATH="/usr/local/bin:/usr/bin:/bin"
|
||||
|
||||
ERROR=$(sh ./reload_website.sh 2>&1)
|
||||
|
||||
if ! [ $? -eq 0 ]; then
|
||||
data=$( jo text="$(echo "gradio.app is not tracking master :o: \nError:\n\n\`\`\`'$ERROR'\`\`\`")")
|
||||
echo "$data"
|
||||
curl -X POST -H 'Content-type: application/json' --data "$data" ${SLACK_WEBHOOK}
|
||||
fi
|