merge in master

This commit is contained in:
Ali Abid 2022-04-18 18:36:43 -07:00
commit 4f1947d8c7
39 changed files with 7241 additions and 144 deletions

1
.gitignore vendored
View File

@ -38,6 +38,7 @@ demo/files/*.mp4
*.bak
workspace.code-workspace
*.h5
.vscode/
# log files
.pnpm-debug.log

View File

@ -10,9 +10,7 @@ coverage:
- "gradio/"
target: 70%
threshold: 0.1
patch:
default:
target: 50% # new contributions should have a coverage at least equal to 50%
patch: off
comment: false
codecov:

View File

@ -1,11 +1,14 @@
Metadata-Version: 1.0
Metadata-Version: 2.1
Name: gradio
Version: 2.7.0b70
Version: 2.8.13
Summary: Python library for easily interacting with trained machine learning models
Home-page: https://github.com/gradio-app/gradio-UI
Author: Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir
Author-email: team@gradio.app
License: Apache License 2.0
Description: UNKNOWN
Keywords: machine learning,visualization,reproducibility
Platform: UNKNOWN
License-File: LICENSE
UNKNOWN

View File

@ -10,7 +10,7 @@ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from gradio import encryptor, networking, queueing, strings, utils
from gradio.context import Context
from gradio.process_examples import cache_interface_examples
from gradio.routes import PredictBody
if TYPE_CHECKING: # Only import for type checking (is False at runtime).
from fastapi.applications import FastAPI
@ -19,7 +19,8 @@ if TYPE_CHECKING: # Only import for type checking (is False at runtime).
class Block:
def __init__(self, without_rendering=False):
def __init__(self, without_rendering=False, css=None):
self.css = css
if without_rendering:
return
self.render()
@ -103,7 +104,7 @@ class BlockContext(Block):
css: Css rules to apply to block.
"""
self.children = []
self.css = css if css is not None else {}
self.css = css
self.visible = visible
super().__init__()
@ -378,7 +379,6 @@ class Blocks(BlockContext):
height: int = 500,
width: int = 900,
encrypt: bool = False,
cache_examples: bool = False,
favicon_path: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
@ -403,7 +403,6 @@ class Blocks(BlockContext):
width (int): The width in pixels of the iframe element containing the interface (used if inline=True)
height (int): The height in pixels of the iframe element containing the interface (used if inline=True)
encrypt (bool): If True, flagged data will be encrypted by key provided by creator at launch
cache_examples (bool): If True, examples outputs will be processed and cached in a folder, and will be used if a user uses an example input.
favicon_path (str): If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.
ssl_keyfile (str): If a path to a file is provided, will use this as the private key file to create a local server running on https.
ssl_certfile (str): If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
@ -414,7 +413,6 @@ class Blocks(BlockContext):
share_url (str): Publicly accessible link to the demo (if share=True, otherwise None)
"""
self.config = self.get_config_file()
self.cache_examples = cache_examples
if (
auth
and not callable(auth)
@ -443,9 +441,6 @@ class Blocks(BlockContext):
config = self.get_config_file()
self.config = config
if self.cache_examples:
cache_interface_examples(self)
if self.is_running:
self.server_app.launchable = self
print(

View File

@ -4,7 +4,7 @@ import re
import requests
from gradio import components
from gradio import components, utils
def get_huggingface_interface(model_name, api_key, alias):
@ -206,6 +206,13 @@ def get_huggingface_interface(model_name, api_key, alias):
"preprocess": lambda x: {"inputs": x},
"postprocess": encode_to_base64,
},
"token-classification": {
# example model: hf.co/huggingface-course/bert-finetuned-ner
"inputs": components.Textbox(label="Input"),
"outputs": components.HighlightedText(label="Output"),
"preprocess": lambda x: {"inputs": x},
"postprocess": lambda r: r, # Handled as a special case in query_huggingface_api()
},
}
if p is None or not (p in pipelines):
@ -228,6 +235,12 @@ def get_huggingface_interface(model_name, api_key, alias):
response.status_code
)
)
if (
p == "token-classification"
): # Handle as a special case since HF API only returns the named entities and we need the input as well
ner_groups = response.json()
input_string = params[0]
response = utils.format_ner_list(input_string, ner_groups)
output = pipeline["postprocess"](response)
return output

View File

@ -9,7 +9,7 @@ from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, List, Optional
import gradio as gr
from gradio import encryptor
from gradio import encryptor, utils
if TYPE_CHECKING:
from gradio.components import Component
@ -87,7 +87,7 @@ class SimpleCSVLogger(FlaggingCallback):
)
with open(log_filepath, "a", newline="") as csvfile:
writer = csv.writer(csvfile)
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
writer.writerow(csv_data)
with open(log_filepath, "r") as csvfile:
@ -153,7 +153,7 @@ class CSVLogger(FlaggingCallback):
flag_col_index = header.index("flag")
content[flag_index][flag_col_index] = flag_option
output = io.StringIO()
writer = csv.writer(output)
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
writer.writerows(content)
return output.getvalue()
@ -169,7 +169,7 @@ class CSVLogger(FlaggingCallback):
if flag_index is not None:
file_content = replace_flag_at_index(file_content)
output.write(file_content)
writer = csv.writer(output)
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, quotechar="'")
if flag_index is None:
if is_new:
writer.writerow(headers)
@ -181,7 +181,9 @@ class CSVLogger(FlaggingCallback):
else:
if flag_index is None:
with open(log_filepath, "a", newline="") as csvfile:
writer = csv.writer(csvfile)
writer = csv.writer(
csvfile, quoting=csv.QUOTE_NONNUMERIC, quotechar="'"
)
if is_new:
writer.writerow(headers)
writer.writerow(csv_data)
@ -291,6 +293,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
headers = []
for component, sample in zip(self.components, flag_data):
headers.append(component.label)
headers.append(component.label)
infos["flagged"]["features"][component.label] = {
"dtype": "string",
@ -316,12 +319,8 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
# Generate the row corresponding to the flagged sample
csv_data = []
for component, sample in zip(self.components, flag_data):
filepath = (
component.save_flagged(
self.dataset_dir, component.label, sample, None
)
if sample is not None
else ""
filepath = component.save_flagged(
self.dataset_dir, component.label, sample, None
)
csv_data.append(filepath)
if isinstance(component, tuple(file_preview_types)):

View File

@ -54,7 +54,7 @@ def cache_interface_examples(interface: Interface) -> None:
def load_from_cache(interface: Interface, example_id: int) -> List[Any]:
"""Loads a particular cached example for the interface."""
with open(CACHE_FILE) as cache:
examples = list(csv.reader(cache))
examples = list(csv.reader(cache, quotechar="'"))
example = examples[example_id + 1] # +1 to adjust for header
output = []
for component, cell in zip(interface.output_components, example):

View File

@ -7,6 +7,8 @@ from typing import Dict, Tuple
import requests
from gradio.routes import QueuePushBody
DB_FILE = "gradio_queue.db"
@ -106,8 +108,9 @@ def pop() -> Tuple[int, str, Dict, str]:
return result[0], result[1], json.loads(result[2]), result[3]
def push(input_data: Dict, action: str) -> Tuple[str, int]:
input_data = json.dumps(input_data)
def push(body: QueuePushBody) -> Tuple[str, int]:
action = body.action
input_data = json.dumps({"data": body.data})
hash = generate_hash()
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()

View File

@ -9,7 +9,8 @@ import posixpath
import secrets
import traceback
import urllib
from typing import Any, List, Optional, Type
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type
import orjson
import pkg_resources
@ -21,6 +22,7 @@ from fastapi.responses import FileResponse, HTMLResponse, JSONResponse
from fastapi.security import OAuth2PasswordRequestForm
from fastapi.templating import Jinja2Templates
from jinja2.exceptions import TemplateNotFound
from pydantic import BaseModel
from starlette.responses import RedirectResponse
from gradio import encryptor, queueing, utils
@ -49,6 +51,43 @@ class ORJSONResponse(JSONResponse):
templates = Jinja2Templates(directory=STATIC_TEMPLATE_LIB)
###########
# Data Models
###########
class PredictBody(BaseModel):
session_hash: Optional[str]
example_id: Optional[int]
data: List[Any]
state: Optional[Any]
fn_index: Optional[int]
class FlagData(BaseModel):
input_data: List[Any]
output_data: List[Any]
flag_option: Optional[str]
flag_index: Optional[int]
class FlagBody(BaseModel):
data: FlagData
class InterpretBody(BaseModel):
data: List[Any]
class QueueStatusBody(BaseModel):
hash: str
class QueuePushBody(BaseModel):
action: str
data: Any
###########
# Auth
###########
@ -166,7 +205,8 @@ def create_app() -> FastAPI:
io.BytesIO(file_data), attachment_filename=os.path.basename(path)
)
else:
return FileResponse(safe_join(app.cwd, path))
if Path(app.cwd).resolve() in Path(path).resolve().parents:
return FileResponse(Path(path).resolve())
@app.get("/api", response_class=HTMLResponse) # Needed for Spaces
@app.get("/api/", response_class=HTMLResponse)
@ -229,49 +269,14 @@ def create_app() -> FastAPI:
raise error
return output
@app.post("/api/flag/", dependencies=[Depends(login_check)])
async def flag(request: Request, username: str = Depends(get_current_user)):
if app.blocks.analytics_enabled:
await utils.log_feature_analytics(app.blocks.ip_address, "flag")
body = await request.json()
data = body["data"]
await run_in_threadpool(
app.blocks.flagging_callback.flag,
app.blocks,
data["input_data"],
data["output_data"],
flag_option=data.get("flag_option"),
flag_index=data.get("flag_index"),
username=username,
)
return {"success": True}
@app.post("/api/interpret/", dependencies=[Depends(login_check)])
async def interpret(request: Request):
if app.blocks.analytics_enabled:
await utils.log_feature_analytics(app.blocks.ip_address, "interpret")
body = await request.json()
raw_input = body["data"]
interpretation_scores, alternative_outputs = await run_in_threadpool(
app.blocks.interpret, raw_input
)
return {
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs,
}
@app.post("/api/queue/push/", dependencies=[Depends(login_check)])
async def queue_push(request: Request):
body = await request.json()
action = body["action"]
job_hash, queue_position = queueing.push(body, action)
async def queue_push(body: QueuePushBody):
job_hash, queue_position = queueing.push(body)
return {"hash": job_hash, "queue_position": queue_position}
@app.post("/api/queue/status/", dependencies=[Depends(login_check)])
async def queue_status(request: Request):
body = await request.json()
hash = body["hash"]
status, data = queueing.get_status(hash)
async def queue_status(body: QueueStatusBody):
status, data = queueing.get_status(body.hash)
return {"status": status, "data": data}
return app

View File

@ -22,6 +22,8 @@
<noscript>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans&display=swap">
</noscript>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans&display=swap">
<link rel="stylesheet" href="https://gradio.app/assets/prism.css">
<style>
html {
@ -211,6 +213,36 @@
color: grey !important;
pointer-events: none;
}
.copy {
float: right;
padding-right: 1em;
background: whitesmoke;
border: none !important;
cursor: pointer;
}
.float-left {
float:left;
width: 90%;
overflow: auto;
}
.copy-svg {
visibility: hidden;
margin: 1em 0 0 0 !important;
width: 20px;
}
.code-block:hover .copy-svg {
visibility: visible;
}
pre {
float:left;
width: 90%;
overflow: auto !important;
background: inherit !important;
}
</style>
<meta property="og:url" content="https://gradio.app/" />
@ -286,23 +318,31 @@
</ul>
<h4 id="payload">Payload: </h4>
<div class="json">
<div class="json code-block">
<div class="float-left">
<p>&emsp;&emsp;{</p>
<p>&emsp;&emsp;&emsp;&emsp;"data": [{%for i in range(0, len_inputs)%} <span>{{ input_types[i]
}}</span>{% if i != len_inputs - 1 %} ,{% endif %}{%endfor%} ]</p>
<p>&emsp;&emsp;}</p>
</div>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
{% if auth is not none %}
<p>Note: This interface requires authentication. This means you will have to first post to the login api before you can post to the predict endpoint. See below for more info </p>
{% endif %}
<h4 id="response">Response: </h4>
<div class="json">
<div class="json code-block">
<div class="float-left">
<p>&emsp;&emsp;{</p>
<p>&emsp;&emsp;&emsp;&emsp;"data": [{%for i in range(0, len_outputs)%} <span>{{ output_types[i]
}}</span>{% if i != len_outputs - 1 %} ,{% endif %}{%endfor%} ],</p>
<p>&emsp;&emsp;&emsp;&emsp;"durations": [ float ], # the time taken for the prediction to complete</p>
<p>&emsp;&emsp;&emsp;&emsp;"avg_durations": [ float ] # the average time taken for all predictions so far (used to estimate the runtime)</p>
<p>&emsp;&emsp;}</p>
</div>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
<h4 id="try-it">Try it (live demo): </h4>
@ -325,16 +365,9 @@
<div class="json">
{% if auth is not none %}
<!-- import requests-->
<!-- sess = requests.session()-->
<!-- sess.post(url='INTERFACE_URL/login', data={"username": "USERNAME", "password":"PASSWORD"})-->
<!-- r = sess.post(url='INTERFACE_URL/api/predict/',json={"data":[INPUT]}, )-->
<!-- -->
<!-- print(r.json())-->
<div class="json code-block">
<div class="float-left">
{% if auth is not none %}
<p class="syntax">import requests</p>
<br>
<p class="syntax">sess = requests.session()</p>
@ -363,11 +396,15 @@
<p>r.json()</p>
{% endif %}
</div>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
</div>
<div class="demo-window" demo="2">
<div class="json">
<div class="json code-block">
<div class="float-left">
{% if auth is not none %}
<p class="syntax">curl -X POST&nbsp;-F 'username=USERNAME' -F 'password=PASSWORD' <span class="syntax" id="curl_syntax_url_login"></span>&nbsp;-c cookies.txt </p>
@ -386,10 +423,14 @@
{% endif %}
</div>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
</div>
<div class="demo-window" demo="3">
<div class="json">
<div class="json code-block">
<div class="float-left">
{% if auth is not none %}
<p class="syntax">// Will only work locally.</p>
<br>
@ -415,6 +456,9 @@
console.log(json_response) })</p>
</p>
{% endif %}
</div>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
</div>
</div>
@ -437,11 +481,20 @@
<p>&emsp;&emsp;}</p>
</div>
</div>
<div class="hidden" id="related-methods-holder">
<h4 id="related">Related Methods: </h4>
<div class="json code-block">
<pre><code class="language-python float-left" id="related-methods"></code></pre>
<button class="copy" onclick="copyCode(this)"><svg class="copy-svg" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" style="fill: #808080;"><path d="M320 448v40c0 13.255-10.745 24-24 24H24c-13.255 0-24-10.745-24-24V120c0-13.255 10.745-24 24-24h72v296c0 30.879 25.121 56 56 56h168zm0-344V0H152c-13.255 0-24 10.745-24 24v368c0 13.255 10.745 24 24 24h272c13.255 0 24-10.745 24-24V128H344c-13.2 0-24-10.8-24-24zm120.971-31.029L375.029 7.029A24 24 0 0 0 358.059 0H352v96h96v-6.059a24 24 0 0 0-7.029-16.97z"/></svg>
<div></div></button>
</div>
</div>
</div>
</main>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script>
var len_inputs = {{ len_inputs }};
var len_outputs = {{ len_outputs }};
@ -538,6 +591,78 @@
$(`.demo-window[demo="${demo_num}"]`).show();
})
var inputRelatedMethods = {
'Image': `# To convert your image file into the base64 format required by the API
gr.processing_utils.encode_url_or_file_to_base64(path)
`,
'Video':`# To convert your video file into the base64 format required by the API
gr.processing_utils.encode_url_or_file_to_base64(path)
`,
'Audio':`# To convert your audio file into the base64 format required by the API
gr.processing_utils.encode_url_or_file_to_base64(path)
`,
'File':`# To convert your file into the base64 format required by the API
gr.processing_utils.encode_url_or_file_to_base64(path)
`
}
var outputRelatedMethods = {
'Image': `# To convert the base64 image returned by the API to an image tmp file object
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
`,
'Video': `# To convert the base64 video returned by the API to an video tmp file object
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
`,
'Audio': `# To convert the base64 audio returned by the API to an audio tmp file object
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
`,
'File': `# To convert the base64 file returned by the API to a regular tmp file object
gr.processing_utils.decode_base64_to_file(encoding, encryption_key=None, file_path=None)
`
}
var showRelated = false;
var relatedMethods = `import gradio as gr
`
{% for i in range(len_inputs) %}
if ("{{inputs[i]}}" in inputRelatedMethods) {
showRelated = true;
relatedMethods += inputRelatedMethods["{{inputs[i]}}"]
}
{% endfor %}
{% for i in range(len_outputs) %}
if ("{{outputs[i]}}" in outputRelatedMethods) {
showRelated = true;
relatedMethods += outputRelatedMethods["{{outputs[i]}}"]
}
{% endfor %}
if (showRelated) {
document.getElementById('related-methods').innerHTML = relatedMethods;
$('#related-methods-holder').removeClass("hidden");
}
function copyCode(elem) {
var text = elem.parentElement.innerText;
navigator.clipboard.writeText(text);
elem.firstChild.style="fill: #eb9f59;"
setTimeout(function(){
elem.firstChild.style="fill: #808080;"
}, 600);
};
var friendlyHttpStatus = {
'200': 'OK',
@ -584,6 +709,8 @@
};
</script>
<script src="https://gradio.app/assets/prism.js"></script>
</body>
</html>

View File

@ -1,5 +1,15 @@
<!DOCTYPE html>
<html lang="en" style="height: 100%; margin: 0; padding: 0">
<html
lang="en"
style="
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
flex-grow: 1;
height: 100%;
"
>
<head>
<meta charset="utf-8" />
<meta
@ -45,11 +55,22 @@
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
<title>Gradio</title>
<script type="module" crossorigin src="./assets/index.2c3c09fa.js"></script>
<link rel="stylesheet" href="./assets/index.689bcbeb.css">
<script type="module" crossorigin src="./assets/index.225863aa.js"></script>
<link rel="stylesheet" href="./assets/index.39bf42f9.css">
</head>
<body style="height: 100%; margin: 0; padding: 0">
<div id="root" style="min-height: 100%"></div>
<body
style="
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
flex-grow: 1;
"
>
<div
id="root"
style="display: flex; flex-direction: column; flex-grow: 1"
></div>
</body>
</html>

View File

@ -2,6 +2,7 @@
from __future__ import annotations
import copy
import csv
import inspect
import json
@ -11,7 +12,7 @@ import random
import warnings
from copy import deepcopy
from distutils.version import StrictVersion
from typing import TYPE_CHECKING, Any, Callable, Dict
from typing import TYPE_CHECKING, Any, Callable, Dict, List
import aiohttp
import analytics
@ -215,29 +216,34 @@ def get_config_file(interface: Interface) -> Dict[str, Any]:
}
try:
param_names = inspect.getfullargspec(interface.predict[0])[0]
for iface, param in zip(config["input_components"], param_names):
if not iface["label"]:
iface["label"] = param.replace("_", " ")
for i, iface in enumerate(config["output_components"]):
for index, component in enumerate(config["input_components"]):
if not component["label"]:
if index < len(param_names):
component["label"] = param_names[index].replace("_", " ")
else:
component["label"] = (
f"input {index + 1}"
if len(config["input_components"]) > 1
else "input"
)
for index, component in enumerate(config["output_components"]):
outputs_per_function = int(
len(interface.output_components) / len(interface.predict)
)
function_index = i // outputs_per_function
component_index = i - function_index * outputs_per_function
ret_name = (
"Output " + str(component_index + 1)
if outputs_per_function > 1
else "Output"
)
if iface["label"] is None:
iface["label"] = ret_name
function_index = index // outputs_per_function
component_index = index - function_index * outputs_per_function
if component["label"] is None:
component["label"] = (
f"output {component_index + 1}"
if outputs_per_function > 1
else "output"
)
if len(interface.predict) > 1:
iface["label"] = (
component["label"] = (
interface.function_names[function_index].replace("_", " ")
+ ": "
+ iface["label"]
+ component["label"]
)
except ValueError:
pass
if interface.examples is not None:
@ -329,3 +335,20 @@ def assert_configs_are_equivalent_besides_ids(config1, config2):
assert d1["queue"] == d2["queue"], "{} does not match {}".format(d1, d2)
return True
def format_ner_list(input_string: str, ner_groups: Dict[str : str | int]):
if len(ner_groups) == 0:
return [(input_string, None)]
output = []
prev_end = 0
for group in ner_groups:
entity, start, end = group["entity_group"], group["start"], group["end"]
output.append((input_string[prev_end:start], None))
output.append((input_string[start:end], entity))
prev_end = end
output.append((input_string[end:], None))
return output

View File

@ -1 +1 @@
2.7.0b70
2.8.13

View File

@ -0,0 +1,81 @@
# Adding Rich Descriptions to Your Demo
related_spaces: https://huggingface.co/spaces/ThomasSimonini/Chat-with-Gandalf-GPT-J6B, https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot, https://huggingface.co/spaces/nateraw/cryptopunks-generator
tags: MARKDOWN, DESCRIPTION, ARTICLE
## Introduction
When an interface is shared, it is usually accompanied with some form of explanatory text, links or images. This guide will go over how to easily add these on gradio.
For example, take a look at this fun chatbot interface below. It has a title, description, image as well as a link in the bottom.
<iframe src="https://hf.space/gradioiframe/aliabd/rick-and-morty/+" frameBorder="0" height="875" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
## The parameters in `Interface`
There are three parameters in `Interface` where text can go:
* `title`: which accepts text and can displays it at the very top of interface
* `description`: which accepts text, markdown or HTML and places it right under the title.
* `article`: which is also accepts text, markdown or HTML but places it below the interface.
![annotated](website/src/assets/img/guides/adding_rich_descriptions_to_your_demo/annotated.png)
## Code example
Here's all the text-related code required to recreate the interface shown above.
```python
import gradio as gr
title = "Ask Rick a Question"
description = """
<center>
The bot was trained to answer questions based on Rick and Morty dialogues. Ask Rick anything!
<img src="https://huggingface.co/spaces/course-demos/Rick_and_Morty_QA/resolve/main/rick.png" width=200px>
</center>
"""
article = "Check out [the original Rick and Morty Bot](https://huggingface.co/spaces/kingabzpro/Rick_and_Morty_Bot) that this demo is based off of."
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
tokenizer = AutoTokenizer.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
model = AutoModelForCausalLM.from_pretrained("ericzhou/DialoGPT-Medium-Rick_v2")
def predict(input):
# tokenize the new input sentence
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
# generate a response
history = model.generate(new_user_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
# convert the tokens to text, and then split the responses into the right format
response = tokenizer.decode(history[0]).split("<|endoftext|>")
return response[1]
gr.Interface(fn = predict, inputs = ["textbox"], outputs = ["text"], title = title, description = description, article = article).launch()
```
Of course, you don't have to use HTML and can instead rely on markdown, like we've done in the `article` parameter above.
The table below shows the syntax for the most common markdown commands.
| Type | Syntax |
| ----------- | ----------- |
| Header | # Heading 1 ## Heading 2 ### Heading 3 |
| Link | \[gradio's website](https://gradio.app) |
| Image | !\[gradio's logo](https://gradio.app/assets/img/logo.png) |
| Text Formatting | \_italic_ \*\*bold** |
| List | \* Item 1 \* Item 2 |
| Quote | \> this is a quote |
| Code | Inline \`code\` has \`back-ticks around\` it. |
Here's a neat [cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) with more.
### That's all! Happy building :)

View File

@ -37,6 +37,7 @@ With these three arguments, we can quickly create interfaces and `launch()` th
Let's say we want to customize the input text field - for example, we wanted it to be larger and have a text hint. If we use the actual input class for `Textbox` instead of using the string shortcut, we have access to much more customizability. To see a list of all the components we support and how you can customize them, check out the [Docs](https://gradio.app/docs).
**Sidenote**: `Interface.launch()` method returns 3 values:
1. `app`, which is the FastAPI application that is powering the Gradio demo
2. `local_url`, which is the local address of the server
3. `share_url`, which is the public address for this demo (it is generated if `share=True` more [on this later](https://gradio.app/getting_started/#sharing-interfaces-publicly))
@ -161,7 +162,7 @@ Share links expire after 72 hours. For permanent hosting, see Hosting Gradio App
### Hosting Gradio Apps on Spaces
Huggingface provides the infrastructure to permanently host your Gradio model on the internet, for free! You can either drag and drop a folder containing your Gradio model and all related files, or you can point HF Spaces to your Git repository and HP Spaces will pull the Gradio interface from there. See [Huggingface Spaces](http://huggingface.co/spaces/) for more information.
Huggingface provides the infrastructure to permanently host your Gradio model on the internet, for free! You can either drag and drop a folder containing your Gradio model and all related files, or you can point HF Spaces to your Git repository and HF Spaces will pull the Gradio interface from there. See [Huggingface Spaces](http://huggingface.co/spaces/) for more information.
![Hosting Demo](/assets/img/hf_demo.gif)

View File

@ -0,0 +1,161 @@
# Using Hugging Face Integrations
related_spaces: https://huggingface.co/spaces/osanseviero/helsinki_translation_en_es, https://huggingface.co/spaces/osanseviero/remove-bg-webcam, https://huggingface.co/spaces/mrm8488/GPT-J-6B, https://huggingface.co/spaces/akhaliq/T0pp, https://huggingface.co/spaces/osanseviero/mix_match_gradio
tags: HUB, SPACES, EMBED
Contributed by <a href="https://huggingface.co/osanseviero">Omar Sanseviero</a> 🦙
## Introduction
The Hugging Face Hub is a central platform that has over 30,000 [models](https://huggingface.co/models), 3,000 [datasets](https://huggingface.co/datasets) and 2,000 [demos](https://huggingface.co/spaces), also known as Spaces. From Natural Language Processing to Computer Vision and Speech, the Hub supports multiple domains. Although Hugging Face is famous for its 🤗 transformers library, the Hub also supports dozens of ML libraries, such as PyTorch, TensorFlow, spaCy, and many others.
Gradio has multiple features that make it extremely easy to leverage existing models and Spaces on the Hub. This guide walks through these features.
## Using regular inference with `pipeline`
First, let's build a simple interface that translates text from English to Spanish. Between the over a thousand models shared by the University of Helsinki, there is an [existing model](https://huggingface.co/Helsinki-NLP/opus-mt-en-es), `opus-mt-en-es`, that does precisely this!
The 🤗 transformers library has a very easy-to-use abstraction, [`pipeline()`](https://huggingface.co/docs/transformers/v4.16.2/en/main_classes/pipelines#transformers.pipeline) that handles most of the complex code to offer a simple API for common tasks. By specifying the task and an (optional) model, you can use an existing model with few lines:
```python
import gradio as gr
from transformers import pipeline
pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
def predict(text):
return pipe(text)[0]["translation_text"]
iface = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
examples=[["Hello! My name is Omar"]]
)
iface.launch()
```
The previous code produces the following interface, which you can try right here in your browser:
<iframe src="https://hf.space/gradioiframe/osanseviero/helsinki_translation_en_es/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
This demo requires installing four libraries: gradio, torch, transformers, and sentencepiece. Apart from that, this is a Gradio with the structure you're used to! The demo is a usual Gradio `Interface` with a prediction function, a specified input, and a specified output. The prediction function executes the `pipeline` function with the given input, retrieves the first (and only) translation result, and returns the `translation_text` field, which you're interested in.
## Using Hugging Face Inference API
Hugging Face has a service called the [Inference API](https://huggingface.co/inference-api) which allows you to send HTTP requests to models in the Hub. For transformers-based models, the API can be 2 to 10 times faster than running the inference yourself. The API has a friendly [free tier](https://huggingface.co/pricing).
Let's try the same demo as above but using the Inference API instead of loading the model yourself. Given a Hugging Face model supported in the Inference API, Gradio can automatically infer the expected input and output and make the underlying server calls, so you don't have to worry about defining the prediction function. Here is what the code would look like!
```python
import gradio as gr
iface = gr.Interface.load("huggingface/Helsinki-NLP/opus-mt-en-es",
examples=[["Hello! My name is Omar"]]
)
iface.launch()
```
Let's go over some of the key differences:
* `Interface.load()` is used instead of the usual `Interface()`.
* `Interface.load()` receives a string with the prefix `huggingface/`, and then the model repository ID.
* Since the input, output and prediction functions are not needed, you only need to modify the UI parts (such as `title`, `description`, and `examples`).
* There is no need to install any dependencies (except Gradio) since you are not loading the model on your computer.
You might notice that the first inference takes about 20 seconds. This happens since the Inference API is loading the model in the server. You get some benefits afterward:
* The inference will be much faster.
* The server caches your requests.
* You get built-in automatic scaling.
## Hosting your Gradio demos
[Hugging Face Spaces](hf.co/spaces) allows anyone to host their Gradio demos freely. The community shares oven 2,000 Spaces. Uploading your Gradio demos take a couple of minutes. You can head to [hf.co/new-space](https://huggingface.co/new-space), select the Gradio SDK, create an `app.py` file, and voila! You have a demo you can share with anyone else.
## Building demos based on other demos
You can use the existing Spaces to tweak the UI or combine multiple demos. Let's find how to do this! First, let's take a look at an existing demo that does background removal.
This is a Gradio demo [already shared](https://huggingface.co/spaces/eugenesiow/remove-bg) by a community member. You can load an existing demo using `Interface` in a syntax similar to how it's done for the Inference API. It just takes two lines of code and with the prefix `spaces`.
```python
import gradio as gr
gr.Interface.load("spaces/eugenesiow/remove-bg").launch()
```
The code snippet above will load the same interface as the corresponding Space demo.
<iframe src="https://hf.space/gradioiframe/eugenesiow/remove-bg/+" frameBorder="0" height="900" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
You can change UI elements, such as the title or theme, but also change the expected type. The previous Space expected users to upload images. What if you would like users to have their webcam and remove the background from there? You can load the Space but change the source of input as follows:
```python
import gradio as gr
gr.Interface.load(
"spaces/eugenesiow/remove-bg",
inputs=[gr.inputs.Image(label="Input Image", source="webcam")]
).launch()
```
The code above generates the following demo.
<iframe src="https://hf.space/gradioiframe/osanseviero/remove-bg-webcam/+" frameBorder="0" height="600" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
As you can see, the demo looks the same, but it uses a webcam input instead of user-uploaded images.
## Using multiple Spaces
Sometimes a single model inference will not be enough: you might want to call multiple models by piping them (using the output of model A as the input of model B). `Series` can achieve this. Other times, you might want to run two models in parallel to compare them. `Parallel` can do this!
Let's combine the notion of running things in parallel with the Spaces integration. The [GPT-J-6B](https://huggingface.co/spaces/mrm8488/GPT-J-6B) Space demos a model that generates text using a model called GPT-J. The [T0pp](https://huggingface.co/spaces/akhaliq/T0pp) Space demos another generative model called T0pp. Let's see how to combine both into one.
```python
import gradio as gr
iface1 = gr.Interface.load("spaces/mrm8488/GPT-J-6B")
iface2 = gr.Interface.load("spaces/akhaliq/T0pp")
iface3 = gr.mix.Parallel(
iface1, iface2,
examples = [
['Which country will win the 2002 World Cup?'],
["A is the son's of B's uncle. What is the family relationship between A and B?"],
["In 2030, "],
])
iface3.launch()
```
`iface1` and `iface2` are loading existing Spaces. Then, with `Parallel`, you can run the interfaces parallelly. When you click submit, you will get the output for both interfaces. This is how the demo looks like:
<iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
Although both models are generative, you can see that the way both models behave is very different. That's a powerful application of `Parallel`!
## Embedding your Space demo on other websites
Throughout this guide, you've seen there are Gradio demos embedded. You can also do this on own website! The first step is to create a Space with the demo you want to showcase. You can embed it in your HTML code, as shown in the following self-contained example.
```bash
&lt;iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"&gt;&lt;/iframe&gt;
```
## Recap
That's it! Let's recap what you can do:
1. Host your Gradio demos in Spaces.
2. Use the Inference API to build demos in two lines of code.
3. Load existing Spaces and modify them.
4. Combine multiple Spaces by running them sequentially or parallelly.
5. Embed your Space demo directly on a website.
🤗

View File

@ -0,0 +1,49 @@
# Using the API Docs
tags: API
## Introduction
Every gradio interface comes with an API you can use directly. To find out how to use it, just click the `view the api` button at the bottom of the page (whether its hosted on spaces, generated using `share=True`, or running locally).
![view the api button](website/src/assets/img/guides/using_the_api_docs/view-the-api-button.gif)
This button opens up interface-specific API docs. This will show you the predict endpoint, payload, response, as well as sample code snippets in Python, JS and cURL.
# What will the API docs tell you?
Below is an (iframed) example: the API Docs of [this space](https://huggingface.co/spaces/aliabd/nubia).
<iframe src="https://hf.space/gradioiframe/aliabd/nubia/api" frameBorder="5" height="725" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
It shows that there are 7 sections on the page
* The predict **endpoint**:
* Where to send the payload (`https://hf.space/gradioiframe/aliabd/nubia/+/api/predict/`). This is likely the most important piece of information as it defines where the request will be sent.
* The **inputs** and their types
* The **outputs** and their types
* The **payload**:
* What to send and how to structure it. It will always look like:
```python
{
"data": [ input_1, input_2 ... ]
}
```
* The **response**:
* What to expect to receive. It will always look like:
```python
{
"data": [ output_1, output_2 ... ],
"durations": [ float ], # the time taken for the prediction to complete
"avg_durations": [ float ] # the average time taken for all predictions so far (used to estimate the runtime)
}
```
* A live **demo** and **code snippets** in Python, JS and cURL
* You can go directly to this section if you want to quickly try out the API and play around with it.
* Other **methods** related to the inputs/outputs
* Use gradio's helper methods to quickly convert your files to base64 and other formats required by the API.
### That's all! Happy building :)

0
scripts/format_backend.sh Normal file → Executable file
View File

View File

@ -5,7 +5,7 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
else
echo "Uploading to pypi"
set -e
git pull
git pull origin master
old_version=$(grep -Po "(?<=version=\")[^\"]+(?=\")" setup.py)
echo "Current version is $old_version. New version?"
read new_version

View File

@ -5,7 +5,7 @@ except ImportError:
setup(
name="gradio",
version="2.7.0b70",
version="2.8.13",
include_package_data=True,
description="Python library for easily interacting with trained machine learning models",
author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 237 KiB

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 33 KiB

View File

@ -4,6 +4,7 @@ import os
import unittest
from gradio import queueing
from gradio.routes import QueuePushBody
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@ -30,9 +31,11 @@ class TestQueuingActions(unittest.TestCase):
queueing.close()
def test_push_pop_status(self):
hash1, position = queueing.push({"data": "test1"}, "predict")
request = QueuePushBody(data="test1", action="predict")
hash1, position = queueing.push(request)
self.assertEquals(position, 0)
hash2, position = queueing.push({"data": "test2"}, "predict")
request = QueuePushBody(data="test2", action="predict")
hash2, position = queueing.push(request)
self.assertEquals(position, 1)
status, position = queueing.get_status(hash2)
self.assertEquals(status, "QUEUED")
@ -43,8 +46,9 @@ class TestQueuingActions(unittest.TestCase):
self.assertEquals(action, "predict")
def test_jobs(self):
hash1, _ = queueing.push({"data": "test1"}, "predict")
hash2, position = queueing.push({"data": "test1"}, "predict")
request = QueuePushBody(data="test1", action="predict")
hash1, _ = queueing.push(request)
hash2, position = queueing.push(request)
self.assertEquals(position, 1)
queueing.start_job(hash1)

View File

@ -44,6 +44,22 @@ class TestRoutes(unittest.TestCase):
output = dict(response.json())
self.assertEqual(output["data"], ["testtest"])
def test_state(self):
def predict(input, history=""):
history += input
return history, history
io = Interface(predict, ["textbox", "state"], ["textbox", "state"])
app, _, _ = io.launch(prevent_thread_lock=True)
client = TestClient(app)
response = client.post("/api/predict/", json={"data": ["test", None]})
output = dict(response.json())
print("output", output)
self.assertEqual(output["data"], ["test", None])
response = client.post("/api/predict/", json={"data": ["test", None]})
output = dict(response.json())
self.assertEqual(output["data"], ["testtest", None])
def test_queue_push_route(self):
queueing.push = mock.MagicMock(return_value=(None, None))
response = self.client.post(

View File

@ -16,6 +16,7 @@ from gradio.utils import (
assert_configs_are_equivalent_besides_ids,
colab_check,
error_analytics,
format_ner_list,
get_local_ip_address,
ipython_check,
json,
@ -140,5 +141,28 @@ class TestAssertConfigsEquivalent(unittest.TestCase):
)
class TestFormatNERList(unittest.TestCase):
def test_format_ner_list_standard(self):
string = "Wolfgang lives in Berlin"
groups = [
{"entity_group": "PER", "start": 0, "end": 8},
{"entity_group": "LOC", "start": 18, "end": 24},
]
result = [
("", None),
("Wolfgang", "PER"),
(" lives in ", None),
("Berlin", "LOC"),
("", None),
]
self.assertEqual(format_ner_list(string, groups), result)
def test_format_ner_list_empty(self):
string = "I live in a city"
groups = []
result = [("I live in a city", None)]
self.assertEqual(format_ner_list(string, groups), result)
if __name__ == "__main__":
unittest.main()

View File

@ -1,5 +1,15 @@
<!DOCTYPE html>
<html lang="en" style="height: 100%; margin: 0; padding: 0">
<html
lang="en"
style="
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
flex-grow: 1;
height: 100%;
"
>
<head>
<meta charset="utf-8" />
<meta
@ -47,7 +57,18 @@
<title>Gradio</title>
</head>
<body style="height: 100%; margin: 0; padding: 0">
<div id="root" style="min-height: 100%"></div>
<body
style="
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
flex-grow: 1;
"
>
<div
id="root"
style="display: flex; flex-direction: column; flex-grow: 1"
></div>
</body>
</html>

View File

@ -32,12 +32,11 @@
return () => dispatch("destroy", id);
});
let style =
"css" in props
? Object.entries(props.css)
.map((rule) => rule[0] + ": " + rule[1])
.join("; ")
: null;
let style = props.css
? Object.entries(props.css)
.map((rule) => rule[0] + ": " + rule[1])
.join("; ")
: null;
</script>
<svelte:component

View File

@ -54,6 +54,7 @@ interface Config {
space?: string;
detail: string;
dark: boolean;
auth_required: boolean;
}
window.launchGradio = (config: Config, element_query: string) => {

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
<script lang="ts">
export let value: Array<Array<string | number>>;
</script>
<table class="input-dataframe-example">
{#each value.slice(0, 3) as row}
<tr>
{#each row.slice(0, 3) as cell}
<td class="p-2">{cell}</td>
{/each}
{#if row.length > 3}
<td class="p-2">...</td>
{/if}
</tr>
{/each}
{#if value.length > 3}
<tr>
{#each Array(Math.min(4, value[0].length)) as _}
<td class="p-2">...</td>
{/each}
</tr>
{/if}
</table>

View File

@ -407,16 +407,15 @@
on:blur={({ currentTarget }) =>
currentTarget.setAttribute("tabindex", "-1")}
/>
{:else}
<span
class=" cursor-default w-full"
class:opacity-0={editing === id}
tabindex="-1"
role="button"
>
{value}
</span>
{/if}
<span
class="cursor-default w-full"
class:opacity-0={editing === id}
tabindex="-1"
role="button"
>
{value ?? ""}
</span>
</div>
</td>
{/each}

View File

@ -52,14 +52,17 @@ def render_index():
generated_template.write(output_html)
guide_files = ["getting_started.md"]
all_guides = sorted(os.listdir(GRADIO_GUIDES_DIR))
guide_files.extend([file for file in all_guides if file != "getting_started.md"])
guides = []
for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
for guide in guide_files:
if guide.lower() == "readme.md":
continue
guide_name = guide[:-3]
pretty_guide_name = " ".join(
[
word.capitalize().replace("Ml", "ML").replace("Gan", "GAN")
word.capitalize().replace("Ml", "ML").replace("Gan", "GAN").replace("Api", "API")
for word in guide_name.split("_")
]
)
@ -73,13 +76,23 @@ for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
spaces = None
if "related_spaces: " in guide_content:
spaces = guide_content.split("related_spaces: ")[1].split("\n")[0].split(", ")
title = guide_content.split("\n")[0]
contributor = None
if "Contributed by " in guide_content:
contributor = guide_content.split("Contributed by ")[1].split("\n")[0]
url = f"https://gradio.app/{guide_name}/"
guide_content = "\n".join(
[
line
for line in guide_content.split("\n")
if not (line.startswith("tags: ") or line.startswith("related_spaces: "))
if not (
line.startswith("tags: ")
or line.startswith("related_spaces: ")
or line.startswith("Contributed by ")
or line == title
)
]
)
@ -91,15 +104,15 @@ for guide in sorted(os.listdir(GRADIO_GUIDES_DIR)):
"tags": tags,
"spaces": spaces,
"url": url,
"contributor": contributor,
}
)
def render_guides_main():
filtered_guides = [guide for guide in guides if guide["name"] != "getting_started"]
with open("src/guides_main_template.html", encoding="utf-8") as template_file:
template = Template(template_file.read())
output_html = template.render(guides=filtered_guides, navbar_html=navbar_html)
output_html = template.render(guides=guides, navbar_html=navbar_html)
os.makedirs(os.path.join("generated", "guides"), exist_ok=True)
with open(
os.path.join("generated", "guides", "index.html"), "w", encoding="utf-8"
@ -166,7 +179,7 @@ def render_guides():
guide_output = guide_output.replace("</pre>", f"</pre>{copy_button}</div>")
output_html = markdown2.markdown(
guide_output, extras=["target-blank-links", "header-ids"]
guide_output, extras=["target-blank-links", "header-ids", "tables"]
)
os.makedirs("generated", exist_ok=True)
os.makedirs(os.path.join("generated", guide["name"]), exist_ok=True)
@ -188,6 +201,7 @@ def render_guides():
guide_name=guide["name"],
spaces=guide["spaces"],
tags=guide["tags"],
contributor=guide["contributor"],
**GRADIO_ASSETS,
)
generated_template.write(output_html)

Binary file not shown.

After

Width:  |  Height:  |  Size: 397 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

View File

@ -129,12 +129,14 @@
<h2 class="font-semibold group-hover:underline text-xl">{{ guide.pretty_name }}
</h2>
<div class="tags-holder">
{% if guide.tags is not none %}
<p>
{% for tag in guide.tags %}
{{ tag }}<!--
-->{% if not loop.last %}, {% endif %}
{% endfor %}
</p>
{% endif %}
</div>
</div>
</a>
@ -220,7 +222,7 @@
if (txtValue.toUpperCase().indexOf(filter) > -1 || guideContent.toUpperCase().indexOf(filter) > -1) {
a[{{ loop.index - 1}}].style.display = "";
} else {
a[{{ loop.index - 1 }}].style.display = "none";
a[{{ loop.index - 1}}].style.display = "none";
counter++;
}
{% endfor %}

View File

@ -83,6 +83,25 @@
visibility: visible;
}
ol {
list-style: auto;
padding-inline-start: 40px;
list-style-type: none;
}
table {
margin: auto;
}
th {
background: #ccc;
}
th, td {
border: 1px solid #ccc;
padding: 10px;
}
</style>
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1"></script>
<script>
@ -98,6 +117,13 @@
<body class="bg-white text-gray-900 text-md sm:text-lg">
{{navbar_html|safe}}
<div class="container mx-auto max-w-4xl px-4 mb-12 mt-6" id="guide-template">
<div class="prose mt-6 mb-6">
<h1 id="{{guide_name}}" class="header">{{title}}</h1>
{% if contributor is not none %}
<p>Contributed by {{contributor}}</p>
{% endif %}
</div>
{% if spaces is not none %}
<div id='spaces-holder'>
<a href='https://hf.co/spaces' target='_blank'>
@ -113,7 +139,7 @@
</div>
{% endif %}
<div class="prose mt-6">
<div class="prose mt-6 mb-6">
{{ template_html|safe }}
</div>
</div>

7
website/reload_website.sh Executable file → Normal file
View File

@ -1,4 +1,5 @@
#!/bin/sh
set -e
. /home/ubuntu/.bashrc
export PATH="/usr/local/bin:/usr/bin:/bin"
@ -13,4 +14,8 @@ else
fi
docker-compose build
docker-compose up -d
fi
LATEST=$(git log -1 | fgrep commit)$(git log -1 | tail -1)
curl -X POST -H 'Content-type: application/json' --data '{"text":"gradio.app relaoded successfully! :ship:\n\n Latest live commit:\n>`'"${LATEST}"'`"}' ${SLACK_WEBHOOK}
fi

View File

@ -0,0 +1,11 @@
#!/bin/sh
. /home/ubuntu/.bashrc
export PATH="/usr/local/bin:/usr/bin:/bin"
ERROR=$(sh ./reload_website.sh 2>&1)
if ! [ $? -eq 0 ]; then
data=$( jo text="$(echo "gradio.app is not tracking master :o: \nError:\n\n\`\`\`'$ERROR'\`\`\`")")
echo "$data"
curl -X POST -H 'Content-type: application/json' --data "$data" ${SLACK_WEBHOOK}
fi