mirror of
https://github.com/gradio-app/gradio.git
synced 2025-04-12 12:40:29 +08:00
Merge branch 'master' into Accelerate-Tests
This commit is contained in:
commit
6a30e5d24f
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
2
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@ -1,6 +1,6 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an improvement or new feature for Gradio
|
||||
about: Suggest an improvement or new feature or a new Guide for Gradio
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
@ -5,18 +5,25 @@ Prequisites:
|
||||
* Python 3.7+
|
||||
* Node 16.0+ (optional for backend-only changes, but needed for any frontend changes)
|
||||
|
||||
More than 30 awesome developers have contributed to the `gradio` library, and we'd be thrilled if you would like be the next `gradio` contributor! You can start by forking or cloning the
|
||||
repo (https://github.com/gradio-app/gradio.git) and creating your own branch to work from.
|
||||
More than 30 awesome developers have contributed to the `gradio` library, and we'd be thrilled if you would like be the next `gradio` contributor! You can start by forking or cloning the repo (https://github.com/gradio-app/gradio.git) and creating your own branch to work from.
|
||||
|
||||
### To install the local version of Gradio
|
||||
### Install Gradio locally from the `master` branch
|
||||
|
||||
* Clone this repo
|
||||
* Navigate to the repo folder and run
|
||||
|
||||
```bash
|
||||
bash scripts/install_gradio.sh
|
||||
```
|
||||
|
||||
### To install the local development version of Gradio
|
||||
* Build the front end
|
||||
|
||||
```
|
||||
bash scripts/build_frontend.sh
|
||||
```
|
||||
|
||||
|
||||
### Install development and testing requirements
|
||||
|
||||
* Navigate to the repo folder and install test requirements (note that it is highly recommended to use a virtual environment since the versions are pinned)
|
||||
|
||||
@ -26,12 +33,6 @@ bash scripts/install_test_requirements.sh
|
||||
|
||||
* Install [chrome driver](https://sites.google.com/chromium.org/driver/) and [chrome](https://www.google.com/chrome/) for selenium (necessary for tests)
|
||||
|
||||
* Build the front end
|
||||
|
||||
```
|
||||
bash scripts/build_frontend.sh
|
||||
```
|
||||
|
||||
* Run the tests
|
||||
|
||||
```
|
||||
|
@ -15,7 +15,7 @@ def plot_forecast(final_year, companies, noise, show_legend, point_style):
|
||||
ax = fig.add_subplot(111)
|
||||
for i, company in enumerate(companies):
|
||||
series = np.arange(0, year_count, dtype=float)
|
||||
series = series ** 2 * (i + 1)
|
||||
series = series**2 * (i + 1)
|
||||
series += np.random.rand(year_count) * noise
|
||||
ax.plot(x, series, plt_format)
|
||||
if show_legend:
|
||||
|
@ -2,9 +2,11 @@ import gradio as gr
|
||||
|
||||
user_db = {"admin": "admin", "foo": "bar"}
|
||||
|
||||
|
||||
def greet(name):
|
||||
return "Hello " + name + "!!"
|
||||
|
||||
|
||||
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
||||
if __name__ == "__main__":
|
||||
iface.launch(auth=lambda u, p: user_db.get(u) == p)
|
||||
|
@ -1,8 +1,10 @@
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def greet(name):
|
||||
return "Hello " + name + "!!"
|
||||
|
||||
|
||||
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
||||
if __name__ == "__main__":
|
||||
iface.launch()
|
||||
|
@ -1,8 +1,10 @@
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def greet(name):
|
||||
return "Hello " + name + "!"
|
||||
|
||||
|
||||
iface = gr.Interface(
|
||||
fn=greet,
|
||||
inputs=gr.inputs.Textbox(lines=2, placeholder="Name Here..."),
|
||||
|
@ -1,11 +1,13 @@
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def greet(name, is_morning, temperature):
|
||||
salutation = "Good morning" if is_morning else "Good evening"
|
||||
greeting = "%s %s. It is %s degrees today" % (salutation, name, temperature)
|
||||
celsius = (temperature - 32) * 5 / 9
|
||||
return greeting, round(celsius, 2)
|
||||
|
||||
|
||||
iface = gr.Interface(
|
||||
fn=greet,
|
||||
inputs=["text", "checkbox", gr.inputs.Slider(0, 100)],
|
||||
|
@ -86,11 +86,9 @@ iface = gr.Interface(
|
||||
fn,
|
||||
inputs=[
|
||||
gr.inputs.Textbox(default="Lorem ipsum", label="Textbox"),
|
||||
gr.inputs.Textbox(lines=3, placeholder="Type here..",
|
||||
label="Textbox 2"),
|
||||
gr.inputs.Textbox(lines=3, placeholder="Type here..", label="Textbox 2"),
|
||||
gr.inputs.Number(label="Number", default=42),
|
||||
gr.inputs.Slider(minimum=10, maximum=20, default=15,
|
||||
label="Slider: 10 - 20"),
|
||||
gr.inputs.Slider(minimum=10, maximum=20, default=15, label="Slider: 10 - 20"),
|
||||
gr.inputs.Slider(maximum=20, step=0.04, label="Slider: step @ 0.04"),
|
||||
gr.inputs.Checkbox(label="Checkbox"),
|
||||
gr.inputs.CheckboxGroup(
|
||||
@ -99,17 +97,14 @@ iface = gr.Interface(
|
||||
gr.inputs.Radio(label="Radio", choices=CHOICES, default=CHOICES[2]),
|
||||
gr.inputs.Dropdown(label="Dropdown", choices=CHOICES),
|
||||
gr.inputs.Image(label="Image", optional=True),
|
||||
gr.inputs.Image(label="Image w/ Cropper",
|
||||
tool="select", optional=True),
|
||||
gr.inputs.Image(label="Image w/ Cropper", tool="select", optional=True),
|
||||
gr.inputs.Image(label="Sketchpad", source="canvas", optional=True),
|
||||
gr.inputs.Image(label="Webcam", source="webcam", optional=True),
|
||||
gr.inputs.Video(label="Video", optional=True),
|
||||
gr.inputs.Audio(label="Audio", optional=True),
|
||||
gr.inputs.Audio(label="Microphone",
|
||||
source="microphone", optional=True),
|
||||
gr.inputs.Audio(label="Microphone", source="microphone", optional=True),
|
||||
gr.inputs.File(label="File", optional=True),
|
||||
gr.inputs.Dataframe(label="Dataframe", headers=[
|
||||
"Name", "Age", "Gender"]),
|
||||
gr.inputs.Dataframe(label="Dataframe", headers=["Name", "Age", "Gender"]),
|
||||
gr.inputs.Timeseries(x="time", y=["price", "value"], optional=True),
|
||||
],
|
||||
outputs=[
|
||||
@ -118,8 +113,9 @@ iface = gr.Interface(
|
||||
gr.outputs.Audio(label="Audio"),
|
||||
gr.outputs.Image(label="Image"),
|
||||
gr.outputs.Video(label="Video"),
|
||||
gr.outputs.HighlightedText(label="HighlightedText", color_map={
|
||||
"punc": "pink", "test 0": "blue"}),
|
||||
gr.outputs.HighlightedText(
|
||||
label="HighlightedText", color_map={"punc": "pink", "test 0": "blue"}
|
||||
),
|
||||
gr.outputs.HighlightedText(label="HighlightedText", show_legend=True),
|
||||
gr.outputs.JSON(label="JSON"),
|
||||
gr.outputs.HTML(label="HTML"),
|
||||
@ -127,8 +123,7 @@ iface = gr.Interface(
|
||||
gr.outputs.Dataframe(label="Dataframe"),
|
||||
gr.outputs.Dataframe(label="Numpy", type="numpy"),
|
||||
gr.outputs.Carousel("image", label="Carousel"),
|
||||
gr.outputs.Timeseries(
|
||||
x="time", y=["price", "value"], label="Timeseries"),
|
||||
gr.outputs.Timeseries(x="time", y=["price", "value"], label="Timeseries"),
|
||||
],
|
||||
examples=[
|
||||
[
|
||||
|
@ -6,11 +6,19 @@ import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch_transformers import (WEIGHTS_NAME, BertConfig,
|
||||
BertForQuestionAnswering, BertTokenizer)
|
||||
from pytorch_transformers import (
|
||||
WEIGHTS_NAME,
|
||||
BertConfig,
|
||||
BertForQuestionAnswering,
|
||||
BertTokenizer,
|
||||
)
|
||||
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
|
||||
from utils import (get_answer, input_to_squad_example,
|
||||
squad_examples_to_features, to_list)
|
||||
from utils import (
|
||||
get_answer,
|
||||
input_to_squad_example,
|
||||
squad_examples_to_features,
|
||||
to_list,
|
||||
)
|
||||
|
||||
RawResult = collections.namedtuple(
|
||||
"RawResult", ["unique_id", "start_logits", "end_logits"]
|
||||
|
@ -5,8 +5,7 @@ import math
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from pytorch_transformers.tokenization_bert import (BasicTokenizer,
|
||||
whitespace_tokenize)
|
||||
from pytorch_transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
|
||||
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
|
||||
|
||||
|
||||
|
@ -7,6 +7,7 @@ def reverse_audio(audio):
|
||||
sr, data = audio
|
||||
return (sr, np.flipud(data))
|
||||
|
||||
|
||||
iface = gr.Interface(reverse_audio, "microphone", "audio", examples="audio")
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -13,7 +13,7 @@ def stock_forecast(final_year, companies, noise, show_legend, point_style):
|
||||
ax = fig.add_subplot(111)
|
||||
for i, company in enumerate(companies):
|
||||
series = np.arange(0, year_count, dtype=float)
|
||||
series = series ** 2 * (i + 1)
|
||||
series = series**2 * (i + 1)
|
||||
series += np.random.rand(year_count) * noise
|
||||
ax.plot(x, series, plt_format)
|
||||
if show_legend:
|
||||
|
6342
frontend/package-lock.json
generated
6342
frontend/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -4,10 +4,10 @@
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<link rel='stylesheet' href='/build/bundle.css'>
|
||||
<link rel='stylesheet' href='/build/themes.css'>
|
||||
<link rel='stylesheet' href='build/bundle.css'>
|
||||
<link rel='stylesheet' href='build/themes.css'>
|
||||
|
||||
<link rel="stylesheet" href="./global.css">
|
||||
<link rel="stylesheet" href="build/global.css">
|
||||
|
||||
<title>{{ config['title'] or 'Gradio' }}</title>
|
||||
<meta property="og:url" content="https://gradio.app/" />
|
||||
@ -20,7 +20,9 @@
|
||||
<meta name="twitter:title" content="{{ config['title'] or '' }}">
|
||||
<meta name="twitter:description" content="{{ config['simple_description'] or '' }}">
|
||||
<meta name="twitter:image" content="{{ config['thumbnail'] or '' }}">
|
||||
{%if config['analytics_enabled'] %}
|
||||
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1"></script>
|
||||
{% endif %}
|
||||
<script>
|
||||
window.dataLayer = window.dataLayer || [];
|
||||
function gtag() {
|
||||
@ -41,6 +43,6 @@
|
||||
<div id="root" style="height: 100%"></div>
|
||||
|
||||
</body>
|
||||
<script defer src='/build/bundle.js'></script>
|
||||
<script defer src='build/bundle.js'></script>
|
||||
|
||||
</html>
|
||||
|
@ -52,6 +52,7 @@ export default {
|
||||
copy({
|
||||
targets: [
|
||||
{ src: 'public/*', dest: '../gradio/templates/frontend' },
|
||||
{ src: 'public/global.css', dest: '../gradio/templates/frontend/build' },
|
||||
{ src: 'public/static', dest: '../gradio/templates/frontend' }
|
||||
]
|
||||
}),
|
||||
|
@ -8,6 +8,11 @@
|
||||
import { deepCopy } from "./components/utils/helpers.js";
|
||||
import ExampleSet from "./ExampleSet.svelte";
|
||||
|
||||
import huggingface_theme from "./themes/huggingface.scss";
|
||||
import grass_theme from "./themes/grass.scss";
|
||||
import peach_theme from "./themes/peach.scss";
|
||||
import seafoam_theme from "./themes/seafoam.scss";
|
||||
|
||||
export let input_components,
|
||||
output_components,
|
||||
theme,
|
||||
@ -266,7 +271,7 @@
|
||||
</div>
|
||||
{/if}
|
||||
{#each output_components as output_component, i}
|
||||
{#if output_values[i] !== null}
|
||||
{#if output_values[i] !== null && output_component.name !== "state"}
|
||||
<div class="component" key={i}>
|
||||
<div class="panel-header mb-1.5">{output_component.label}</div>
|
||||
<svelte:component
|
||||
|
@ -22,14 +22,14 @@
|
||||
|
||||
<div class="input-image">
|
||||
<div
|
||||
class="image-preview w-full h-80 flex justify-center items-center dark:bg-gray-600 relative"
|
||||
class="image-preview w-full h-60 flex justify-center items-center dark:bg-gray-600 relative"
|
||||
class:bg-gray-200={value}
|
||||
class:h-80={source !== "webcam"}
|
||||
>
|
||||
{#if source === "canvas"}
|
||||
<ModifySketch
|
||||
on:undo={() => sketch.undo()}
|
||||
on:clear={() => sketch.clear()}
|
||||
{static_src}
|
||||
/>
|
||||
<Sketch
|
||||
{value}
|
||||
|
@ -1,8 +1,13 @@
|
||||
<script>
|
||||
import { afterUpdate } from "svelte";
|
||||
|
||||
export let value, theme;
|
||||
let audio;
|
||||
|
||||
afterUpdate(() => audio.src = value)
|
||||
</script>
|
||||
|
||||
<audio {theme} controls>
|
||||
<audio bind:this={audio} class="w-full" {theme} controls>
|
||||
<source src={value} />
|
||||
</audio>
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
<script>
|
||||
import { createEventDispatcher } from "svelte";
|
||||
|
||||
export let static_src;
|
||||
|
||||
const dispatch = createEventDispatcher();
|
||||
</script>
|
||||
|
||||
@ -9,12 +11,12 @@
|
||||
class="bg-opacity-30 hover:bg-opacity-100 transition p-1.5 bg-yellow-500 dark:bg-red-600 rounded shadow w-8 h-8"
|
||||
on:click={() => dispatch("undo")}
|
||||
>
|
||||
<img alt="undo sketch" src="/static/img/undo-solid.svg" />
|
||||
<img alt="undo sketch" src="{static_src}/static/img/undo-solid.svg" />
|
||||
</button>
|
||||
<button
|
||||
class="clear bg-opacity-30 hover:bg-opacity-100 transition p-1 bg-gray-50 dark:bg-gray-500 rounded shadow w-8 h-8"
|
||||
on:click={() => dispatch("clear")}
|
||||
>
|
||||
<img alt="clear sketch" src="static/img/clear.svg" />
|
||||
<img alt="clear sketch" src="{static_src}/static/img/clear.svg" />
|
||||
</button>
|
||||
</div>
|
||||
|
@ -14,6 +14,11 @@ window.launchGradio = (config, element_query) => {
|
||||
} else {
|
||||
config.static_src = "https://gradio.s3-us-west-2.amazonaws.com/PIP_VERSION";
|
||||
}
|
||||
if (config.css) {
|
||||
let style = document.createElement("style");
|
||||
style.innerHTML = config.css;
|
||||
document.head.appendChild(style);
|
||||
}
|
||||
if (config.detail === "Not authenticated") {
|
||||
new Login({
|
||||
target: target,
|
||||
|
120
frontend/src/themes/grass.scss
Normal file
120
frontend/src/themes/grass.scss
Normal file
@ -0,0 +1,120 @@
|
||||
.gradio-bg[theme="grass"] {
|
||||
@apply dark:bg-gray-700;
|
||||
}
|
||||
.gradio-bg[theme="grass"] .gradio-interface {
|
||||
.component-set {
|
||||
@apply bg-gray-50 dark:bg-gray-800 rounded-none;
|
||||
}
|
||||
.component {
|
||||
@apply p-1 transition;
|
||||
}
|
||||
.panel-header {
|
||||
@apply text-gray-400 dark:text-gray-200 font-semibold;
|
||||
}
|
||||
.panel-button {
|
||||
@apply rounded-none bg-gray-100 dark:bg-gray-800 shadow;
|
||||
}
|
||||
.panel-button.submit {
|
||||
@apply bg-green-400 text-white;
|
||||
}
|
||||
.examples {
|
||||
.examples-holder:not(.gallery) {
|
||||
.examples-table {
|
||||
@apply dark:bg-gray-800;
|
||||
tbody tr:hover {
|
||||
@apply bg-green-400;
|
||||
}
|
||||
}
|
||||
}
|
||||
.examples-holder.gallery .examples-table {
|
||||
.example {
|
||||
@apply dark:bg-gray-800;
|
||||
}
|
||||
.example:hover {
|
||||
@apply bg-green-400;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Input Components */
|
||||
.input-text {
|
||||
@apply rounded-none dark:bg-gray-700 dark:text-gray-50 box-border border-4 p-2 border-white dark:border-gray-600 focus:border-green-400 dark:focus:border-green-400;
|
||||
}
|
||||
.input-number {
|
||||
@apply rounded-none dark:bg-gray-700 dark:text-gray-50 box-border border-4 p-2 border-white dark:border-gray-600 focus:border-green-400 dark:focus:border-green-400;
|
||||
}
|
||||
.input-slider {
|
||||
.range {
|
||||
@apply dark:bg-gray-700 rounded-none;
|
||||
}
|
||||
.range::-webkit-slider-thumb {
|
||||
@apply bg-green-400 rounded-none shadow-sm;
|
||||
}
|
||||
.range::-moz-range-thumb {
|
||||
@apply bg-green-400 rounded-none shadow-sm;
|
||||
}
|
||||
.value {
|
||||
@apply font-semibold text-gray-500 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
}
|
||||
.input-radio {
|
||||
.radio-item {
|
||||
@apply bg-gray-100 rounded-none dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply hidden;
|
||||
}
|
||||
.radio-item.selected {
|
||||
@apply bg-green-400 text-white shadow;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply w-4 h-4 bg-white transition rounded-full box-border;
|
||||
}
|
||||
}
|
||||
|
||||
.input-checkbox-group,
|
||||
.input-checkbox {
|
||||
.checkbox-item {
|
||||
@apply bg-gray-100 rounded-none dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.checkbox-item.selected {
|
||||
@apply bg-green-400 text-white shadow;
|
||||
}
|
||||
}
|
||||
.input-checkbox {
|
||||
.checkbox {
|
||||
@apply bg-gray-200;
|
||||
}
|
||||
.selected .checkbox {
|
||||
@apply bg-green-500;
|
||||
}
|
||||
}
|
||||
.input-checkbox-group .checkbox {
|
||||
@apply hidden;
|
||||
}
|
||||
|
||||
.input-dropdown {
|
||||
.selector {
|
||||
@apply bg-gray-100 rounded-none dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.dropdown-menu {
|
||||
@apply shadow;
|
||||
}
|
||||
.dropdown-item {
|
||||
@apply bg-gray-100 dark:bg-gray-800 hover:bg-green-400 hover:text-gray-50 hover:font-semibold;
|
||||
}
|
||||
.dropdown-item:first-child,
|
||||
.dropdown-item:last-child {
|
||||
@apply rounded-none;
|
||||
}
|
||||
}
|
||||
/* Components */
|
||||
.output-label {
|
||||
.confidence {
|
||||
@apply bg-gray-300 text-white dark:bg-gray-600 font-semibold;
|
||||
}
|
||||
.confidence:first-child {
|
||||
@apply bg-green-400;
|
||||
}
|
||||
}
|
||||
}
|
124
frontend/src/themes/huggingface.scss
Normal file
124
frontend/src/themes/huggingface.scss
Normal file
@ -0,0 +1,124 @@
|
||||
.gradio-bg[theme="huggingface"] {
|
||||
@apply dark:bg-[#0b0f19];
|
||||
}
|
||||
|
||||
.gradio-bg[theme="huggingface"] .gradio-interface {
|
||||
.load-status {
|
||||
@apply text-gray-700;
|
||||
}
|
||||
.component-set {
|
||||
@apply from-gray-50 to-white dark:from-gray-700 dark:to-gray-800 bg-gradient-to-br border border-gray-100 dark:border-none p-4 rounded-lg gap-3;
|
||||
}
|
||||
.panel-header {
|
||||
@apply flex items-center text-sm text-gray-700 dark:text-gray-50 mb-1.5;
|
||||
}
|
||||
.panel-button {
|
||||
@apply from-gray-50 hover:from-gray-100 to-gray-100 bg-gradient-to-b focus:ring-offset-indigo-300 dark:from-gray-700 dark:hover:from-gray-800 dark:to-gray-800 dark:focus:ring-offset-indigo-700 dark:border-none shadow-sm border rounded-lg;
|
||||
}
|
||||
.examples {
|
||||
.examples-holder:not(.gallery) .examples-table {
|
||||
@apply dark:from-gray-700 dark:to-gray-800 bg-gradient-to-br;
|
||||
thead {
|
||||
@apply border-gray-100 dark:border-gray-600;
|
||||
}
|
||||
tbody tr:hover {
|
||||
@apply bg-indigo-500 dark:bg-indigo-900 text-white;
|
||||
}
|
||||
}
|
||||
.examples-holder.gallery .examples-table {
|
||||
.example:hover {
|
||||
@apply bg-indigo-500 dark:bg-indigo-900 text-white;
|
||||
}
|
||||
}
|
||||
}
|
||||
/* Common Classes */
|
||||
.modify-upload {
|
||||
@apply p-1 gap-1;
|
||||
button {
|
||||
@apply rounded-full;
|
||||
}
|
||||
.edit {
|
||||
@apply bg-indigo-400 hover:bg-indigo-500 dark:bg-indigo-500 dark:hover:bg-indigo-400;
|
||||
}
|
||||
.clear {
|
||||
@apply bg-gray-300 hover:bg-gray-400 dark:bg-gray-400 dark:hover:bg-gray-300;
|
||||
}
|
||||
}
|
||||
/* Input Components */
|
||||
.input-text {
|
||||
@apply p-3 border rounded-lg shadow-inner outline-none focus:ring-1 focus:ring-inset focus:ring-indigo-200 focus:shadow-inner placeholder-gray-400 dark:bg-gray-600 dark:placeholder-gray-100 dark:border-none;
|
||||
}
|
||||
.input-number {
|
||||
@apply p-3 border rounded-lg shadow-inner outline-none focus:ring-1 focus:ring-inset focus:ring-indigo-200 focus:shadow-inner placeholder-gray-400 dark:bg-gray-600 dark:placeholder-gray-100 dark:border-none;
|
||||
}
|
||||
.input-radio {
|
||||
.radio-item {
|
||||
@apply border bg-gradient-to-t from-gray-100 to-gray-50 text-gray-600 py-1.5 px-3 hover:to-gray-100 dark:text-gray-50 dark:from-gray-600 dark:to-gray-500 dark:hover:to-gray-600 dark:border-none;
|
||||
}
|
||||
.radio-item.selected {
|
||||
@apply text-indigo-500 dark:text-white dark:from-indigo-600 dark:to-indigo-500;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply bg-white;
|
||||
}
|
||||
.selected .radio-circle {
|
||||
@apply border-4 border-indigo-600 dark:border-indigo-400;
|
||||
}
|
||||
}
|
||||
.input-checkbox-group,
|
||||
.input-checkbox {
|
||||
.checkbox-item {
|
||||
@apply border bg-gradient-to-t from-gray-100 to-gray-50 hover:to-gray-100 text-gray-600 dark:text-gray-50 dark:from-gray-600 dark:to-gray-500 dark:hover:to-gray-600 dark:border-none py-1.5 px-3;
|
||||
}
|
||||
.checkbox-item.selected {
|
||||
@apply text-indigo-500 dark:text-white dark:from-indigo-600 dark:to-indigo-500;
|
||||
}
|
||||
.selected .checkbox {
|
||||
@apply bg-indigo-600 dark:bg-indigo-400;
|
||||
}
|
||||
}
|
||||
.input-dropdown {
|
||||
.selector {
|
||||
@apply border bg-gradient-to-t from-gray-100 to-gray-50 text-gray-600 dark:text-gray-50 dark:from-gray-600 dark:to-gray-500 dark:hover:to-gray-600 dark:border-none py-1.5 px-3 hover:to-gray-100;
|
||||
}
|
||||
.dropdown-item {
|
||||
@apply bg-gray-50 dark:bg-gray-500 hover:bg-gray-400 hover:text-gray-50 dark:hover:bg-indigo-600;
|
||||
}
|
||||
}
|
||||
.input-slider {
|
||||
@apply text-center;
|
||||
.range {
|
||||
@apply bg-white hover:bg-gray-100 dark:bg-gray-600 rounded-full border dark:border-none;
|
||||
}
|
||||
.range::-webkit-slider-thumb {
|
||||
@apply border dark:bg-white bg-indigo-500 rounded-full shadow;
|
||||
}
|
||||
.range::-moz-range-thumb {
|
||||
@apply border dark:bg-white bg-indigo-500 rounded-full shadow;
|
||||
}
|
||||
.value {
|
||||
@apply bg-gray-100 text-gray-700 dark:text-gray-50 dark:bg-gray-600 dark:border-none shadow-inner;
|
||||
}
|
||||
}
|
||||
.input-audio {
|
||||
.start {
|
||||
@apply bg-gradient-to-t border from-gray-100 to-gray-50 text-gray-600 py-1.5 px-3 hover:to-gray-100 dark:text-gray-50 dark:from-gray-600 dark:to-gray-500 dark:hover:to-gray-600 dark:border-none;
|
||||
}
|
||||
.stop {
|
||||
@apply border border-red-200 bg-gradient-to-t from-red-200 to-red-50 text-red-600 py-1.5 px-3 hover:to-red-100 dark:from-red-700 dark:to-red-600 dark:text-red-100;
|
||||
}
|
||||
}
|
||||
/* Output Components */
|
||||
.output-text {
|
||||
@apply p-3 border rounded-lg shadow-inner outline-none focus:ring-1 focus:ring-inset focus:ring-indigo-200 focus:shadow-inner whitespace-pre-wrap dark:bg-gray-600 dark:border-none;
|
||||
}
|
||||
.output-label {
|
||||
.output-class {
|
||||
@apply hidden;
|
||||
}
|
||||
.confidence {
|
||||
@apply bg-gradient-to-r from-indigo-200 to-indigo-500 dark:from-indigo-500 dark:to-indigo-700 rounded text-white;
|
||||
color: transparent;
|
||||
}
|
||||
}
|
||||
}
|
108
frontend/src/themes/peach.scss
Normal file
108
frontend/src/themes/peach.scss
Normal file
@ -0,0 +1,108 @@
|
||||
.gradio-bg[theme="peach"] {
|
||||
@apply bg-gradient-to-r from-red-50 to-yellow-100 dark:from-gray-900 dark:to-gray-800;
|
||||
}
|
||||
.gradio-bg[theme="peach"] .gradio-interface {
|
||||
.component-set {
|
||||
@apply bg-white dark:bg-gray-800 rounded-lg;
|
||||
}
|
||||
.component {
|
||||
@apply p-1 transition;
|
||||
}
|
||||
.panel-header {
|
||||
@apply text-gray-600 dark:text-gray-200 font-semibold;
|
||||
}
|
||||
.panel-button {
|
||||
@apply rounded-lg bg-white dark:bg-gray-800 shadow;
|
||||
}
|
||||
.panel-button.submit {
|
||||
@apply text-white bg-gradient-to-tr from-red-500 to-yellow-400;
|
||||
}
|
||||
.examples {
|
||||
.examples-holder:not(.gallery) {
|
||||
.examples-table {
|
||||
@apply bg-white dark:bg-gray-800;
|
||||
tbody tr:hover {
|
||||
@apply bg-yellow-500 dark:bg-red-800;
|
||||
}
|
||||
}
|
||||
}
|
||||
.examples-table-holder.gallery .examples-table {
|
||||
.example {
|
||||
@apply bg-white dark:bg-gray-800;
|
||||
}
|
||||
.example:hover {
|
||||
@apply bg-yellow-500 dark:bg-red-800;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Input Components */
|
||||
.input-text {
|
||||
@apply rounded-lg bg-gray-50 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.input-number {
|
||||
@apply rounded-lg bg-gray-50 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.input-slider {
|
||||
.range {
|
||||
@apply bg-gray-50 dark:bg-gray-700 rounded-lg;
|
||||
}
|
||||
.range::-webkit-slider-thumb {
|
||||
@apply bg-gradient-to-tr from-red-500 to-yellow-400 rounded-lg shadow-sm;
|
||||
}
|
||||
.range::-moz-range-thumb {
|
||||
@apply bg-gradient-to-tr from-red-500 to-yellow-400 rounded-lg shadow-sm;
|
||||
}
|
||||
.value {
|
||||
@apply font-semibold text-gray-500 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
}
|
||||
.input-radio {
|
||||
.radio-item {
|
||||
@apply bg-gray-100 rounded-lg dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.radio-item.selected {
|
||||
@apply bg-gradient-to-tr from-red-500 to-yellow-400 text-white shadow;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply w-4 h-4 bg-white transition rounded-full box-border;
|
||||
}
|
||||
}
|
||||
|
||||
.input-checkbox-group,
|
||||
.input-checkbox {
|
||||
.checkbox-item {
|
||||
@apply bg-gray-100 rounded-lg dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.checkbox-item.selected {
|
||||
@apply bg-gradient-to-tr from-red-500 to-yellow-400 text-white shadow;
|
||||
}
|
||||
.selected .checkbox {
|
||||
@apply bg-gray-200 bg-opacity-20;
|
||||
}
|
||||
}
|
||||
.input-dropdown {
|
||||
.selector {
|
||||
@apply bg-gray-100 rounded-lg dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.dropdown-menu {
|
||||
@apply shadow;
|
||||
}
|
||||
.dropdown-item {
|
||||
@apply bg-gray-100 dark:bg-gray-800 hover:bg-red-500 hover:text-gray-50 hover:font-semibold;
|
||||
}
|
||||
.dropdown-item:first-child,
|
||||
.dropdown-item:last-child {
|
||||
@apply rounded-lg;
|
||||
}
|
||||
}
|
||||
/* Components */
|
||||
.output-label {
|
||||
.confidence {
|
||||
@apply bg-gray-300 text-white dark:bg-gray-600 font-semibold rounded-lg;
|
||||
}
|
||||
.confidence:first-child {
|
||||
@apply bg-gradient-to-tr from-red-500 to-yellow-400;
|
||||
}
|
||||
}
|
||||
}
|
116
frontend/src/themes/seafoam.scss
Normal file
116
frontend/src/themes/seafoam.scss
Normal file
@ -0,0 +1,116 @@
|
||||
.gradio-bg[theme="seafoam"] {
|
||||
@apply bg-yellow-100 dark:bg-gray-700;
|
||||
}
|
||||
.gradio-bg[theme="seafoam"] .gradio-interface {
|
||||
.component-set {
|
||||
@apply p-0;
|
||||
}
|
||||
.component {
|
||||
@apply p-2 transition bg-white dark:bg-gray-800 shadow-sm;
|
||||
}
|
||||
.component:hover .panel-header {
|
||||
@apply text-green-400 text-base;
|
||||
}
|
||||
.panel-header {
|
||||
@apply text-sm h-6 text-gray-400 dark:text-gray-200 transition-all font-semibold;
|
||||
}
|
||||
.panel-button {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 hover:to-green-400 text-white shadow;
|
||||
}
|
||||
.examples {
|
||||
.examples-holder:not(.gallery) {
|
||||
.examples-table {
|
||||
@apply dark:bg-gray-800;
|
||||
tbody tr:hover {
|
||||
@apply bg-blue-400 dark:bg-blue-500;
|
||||
}
|
||||
}
|
||||
}
|
||||
.examples-holder.gallery .examples-table {
|
||||
.example {
|
||||
@apply dark:bg-gray-800;
|
||||
}
|
||||
.example:hover {
|
||||
@apply bg-blue-400 dark:bg-blue-500;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Common Classes */
|
||||
.edit,
|
||||
.clear {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 hover:to-green-400 text-white;
|
||||
}
|
||||
|
||||
/* Input Components */
|
||||
.input-text {
|
||||
@apply rounded-none bg-gray-50 text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.input-number {
|
||||
@apply rounded-none bg-gray-50 text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.input-slider {
|
||||
.range {
|
||||
@apply bg-gray-50 dark:bg-gray-700 rounded-none;
|
||||
}
|
||||
.range::-webkit-slider-thumb {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 shadow-sm;
|
||||
}
|
||||
.range::-moz-range-thumb {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 shadow-sm;
|
||||
}
|
||||
.value {
|
||||
@apply bg-gray-50 font-semibold text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
}
|
||||
.input-radio {
|
||||
.radio-item {
|
||||
@apply bg-gray-50 text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply bg-gray-50 dark:bg-gray-400 border-4 border-gray-200 dark:border-gray-600;
|
||||
}
|
||||
.radio-item.selected {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 text-white shadow;
|
||||
}
|
||||
.radio-circle {
|
||||
@apply w-4 h-4 bg-white transition rounded-full box-border;
|
||||
}
|
||||
.selected .radio-circle {
|
||||
@apply border-gray-400 opacity-40;
|
||||
}
|
||||
}
|
||||
|
||||
.input-checkbox-group,
|
||||
.input-checkbox {
|
||||
.checkbox-item {
|
||||
@apply bg-gray-50 text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.checkbox-item.selected {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500 text-white shadow;
|
||||
}
|
||||
.selected .checkbox {
|
||||
@apply bg-gray-400 bg-opacity-40;
|
||||
}
|
||||
}
|
||||
.input-dropdown {
|
||||
.selector {
|
||||
@apply bg-gray-50 text-blue-400 dark:bg-gray-700 dark:text-gray-50;
|
||||
}
|
||||
.dropdown-menu {
|
||||
@apply shadow;
|
||||
}
|
||||
.dropdown-item {
|
||||
@apply bg-white dark:bg-gray-800 hover:bg-blue-400 hover:text-gray-50 hover:font-semibold;
|
||||
}
|
||||
}
|
||||
/* Output Components */
|
||||
.output-label {
|
||||
.confidence {
|
||||
@apply bg-gradient-to-t from-gray-400 to-gray-300 dark:from-gray-500 dark:to-gray-400 text-white rounded font-semibold;
|
||||
}
|
||||
.confidence:first-child {
|
||||
@apply bg-gradient-to-t from-blue-400 to-green-300 dark:from-blue-500;
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: gradio
|
||||
Version: 2.7.5.2
|
||||
Version: 2.8.0b5
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq
|
||||
|
@ -34,11 +34,7 @@ class Component:
|
||||
return {}
|
||||
|
||||
def save_flagged(
|
||||
self,
|
||||
dir: str,
|
||||
label: str,
|
||||
data: Any,
|
||||
encryption_key: bool
|
||||
self, dir: str, label: str, data: Any, encryption_key: bool
|
||||
) -> Any:
|
||||
"""
|
||||
Saves flagged data from component
|
||||
@ -52,14 +48,10 @@ class Component:
|
||||
return data
|
||||
|
||||
def save_flagged_file(
|
||||
self,
|
||||
dir: str,
|
||||
label: str,
|
||||
data: Any,
|
||||
encryption_key: bool
|
||||
self, dir: str, label: str, data: Any, encryption_key: bool
|
||||
) -> str:
|
||||
"""
|
||||
Saved flagged data (e.g. image or audio) as a file and returns filepath
|
||||
Saved flagged data (e.g. image or audio) as a file and returns filepath
|
||||
"""
|
||||
if data is None:
|
||||
return None
|
||||
@ -81,9 +73,9 @@ class Component:
|
||||
return label + "/" + new_file_name
|
||||
|
||||
def restore_flagged_file(
|
||||
self,
|
||||
dir: str,
|
||||
file: str,
|
||||
self,
|
||||
dir: str,
|
||||
file: str,
|
||||
encryption_key: bool,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
|
@ -3,18 +3,13 @@ from Crypto.Cipher import AES
|
||||
from Crypto.Hash import SHA256
|
||||
|
||||
|
||||
def get_key(
|
||||
password: str
|
||||
) -> bytes:
|
||||
def get_key(password: str) -> bytes:
|
||||
"""Generates an encryption key based on the password provided."""
|
||||
key = SHA256.new(password.encode()).digest()
|
||||
return key
|
||||
|
||||
|
||||
def encrypt(
|
||||
key: bytes,
|
||||
source: bytes
|
||||
) -> bytes:
|
||||
def encrypt(key: bytes, source: bytes) -> bytes:
|
||||
"""Encrypts source data using the provided encryption key"""
|
||||
IV = Random.new().read(AES.block_size) # generate IV
|
||||
encryptor = AES.new(key, AES.MODE_CBC, IV)
|
||||
@ -24,10 +19,7 @@ def encrypt(
|
||||
return data
|
||||
|
||||
|
||||
def decrypt(
|
||||
key: bytes,
|
||||
source: bytes
|
||||
) -> bytes:
|
||||
def decrypt(key: bytes, source: bytes) -> bytes:
|
||||
IV = source[: AES.block_size] # extract the IV from the beginning
|
||||
decryptor = AES.new(key, AES.MODE_CBC, IV)
|
||||
data = decryptor.decrypt(source[AES.block_size :]) # decrypt
|
||||
|
@ -35,12 +35,13 @@ def get_huggingface_interface(model_name, api_key, alias):
|
||||
content_type = r.headers.get("content-type")
|
||||
# Case 2: the data prefix is a key in the response
|
||||
if content_type == "application/json":
|
||||
try:
|
||||
try:
|
||||
content_type = r.json()[0]["content-type"]
|
||||
base64_repr = r.json()[0]["blob"]
|
||||
except KeyError:
|
||||
raise ValueError("Cannot determine content type returned"
|
||||
"by external API.")
|
||||
raise ValueError(
|
||||
"Cannot determine content type returned" "by external API."
|
||||
)
|
||||
# Case 3: the data prefix is included in the response headers
|
||||
else:
|
||||
pass
|
||||
@ -66,7 +67,7 @@ def get_huggingface_interface(model_name, api_key, alias):
|
||||
"preprocess": lambda i: base64.b64decode(
|
||||
i["data"].split(",")[1]
|
||||
), # convert the base64 representation to binary
|
||||
"postprocess": encode_to_base64,
|
||||
"postprocess": encode_to_base64,
|
||||
},
|
||||
"automatic-speech-recognition": {
|
||||
# example model: https://hf.co/jonatasgrosman/wav2vec2-large-xlsr-53-english
|
||||
|
@ -111,6 +111,7 @@ class CSVLogger(FlaggingCallback):
|
||||
The default implementation of the FlaggingCallback abstract class.
|
||||
Logs the input and output data to a CSV file. Supports encryption.
|
||||
"""
|
||||
|
||||
def setup(self, flagging_dir: str):
|
||||
self.flagging_dir = flagging_dir
|
||||
os.makedirs(flagging_dir, exist_ok=True)
|
||||
@ -323,9 +324,11 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
# Generate the headers and dataset_infos
|
||||
if is_new:
|
||||
headers = []
|
||||
|
||||
|
||||
for i, component in enumerate(interface.input_components):
|
||||
component_label = interface.config["input_components"][i]["label"] or "Input_{}".format(i)
|
||||
component_label = interface.config["input_components"][i][
|
||||
"label"
|
||||
] or "Input_{}".format(i)
|
||||
headers.append(component_label)
|
||||
infos["flagged"]["features"][component_label] = {
|
||||
"dtype": "string",
|
||||
@ -341,7 +344,9 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
break
|
||||
|
||||
for i, component in enumerate(interface.output_components):
|
||||
component_label = interface.config["output_components"][i]["label"] or "Output_{}".format(i)
|
||||
component_label = interface.config["output_components"][i][
|
||||
"label"
|
||||
] or "Output_{}".format(i)
|
||||
headers.append(component_label)
|
||||
infos["flagged"]["features"][component_label] = {
|
||||
"dtype": "string",
|
||||
@ -368,7 +373,9 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
# Generate the row corresponding to the flagged sample
|
||||
csv_data = []
|
||||
for i, component in enumerate(interface.input_components):
|
||||
label = interface.config["input_components"][i]["label"] or "Input_{}".format(i)
|
||||
label = interface.config["input_components"][i][
|
||||
"label"
|
||||
] or "Input_{}".format(i)
|
||||
filepath = component.save_flagged(
|
||||
self.dataset_dir, label, input_data[i], None
|
||||
)
|
||||
@ -378,9 +385,13 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
"{}/resolve/main/{}".format(self.path_to_dataset_repo, filepath)
|
||||
)
|
||||
for i, component in enumerate(interface.output_components):
|
||||
label = interface.config["output_components"][i]["label"] or "Output_{}".format(i)
|
||||
label = interface.config["output_components"][i][
|
||||
"label"
|
||||
] or "Output_{}".format(i)
|
||||
filepath = (
|
||||
component.save_flagged(self.dataset_dir, label, output_data[i], None)
|
||||
component.save_flagged(
|
||||
self.dataset_dir, label, output_data[i], None
|
||||
)
|
||||
if output_data[i] is not None
|
||||
else ""
|
||||
)
|
||||
|
@ -221,10 +221,8 @@ class Textbox(InputComponent):
|
||||
"""
|
||||
masked_inputs = []
|
||||
for binary_mask_vector in binary_mask_matrix:
|
||||
masked_input = np.array(tokens)[np.array(
|
||||
binary_mask_vector, dtype=bool)]
|
||||
masked_inputs.append(
|
||||
self.interpretation_separator.join(masked_input))
|
||||
masked_input = np.array(tokens)[np.array(binary_mask_vector, dtype=bool)]
|
||||
masked_inputs.append(self.interpretation_separator.join(masked_input))
|
||||
return masked_inputs
|
||||
|
||||
def get_interpretation_scores(
|
||||
@ -308,10 +306,8 @@ class Number(InputComponent):
|
||||
delta = 1.0 * self.interpretation_delta * x / 100
|
||||
elif self.interpretation_delta_type == "absolute":
|
||||
delta = self.interpretation_delta
|
||||
negatives = (x + np.arange(-self.interpretation_steps, 0)
|
||||
* delta).tolist()
|
||||
positives = (
|
||||
x + np.arange(1, self.interpretation_steps + 1) * delta).tolist()
|
||||
negatives = (x + np.arange(-self.interpretation_steps, 0) * delta).tolist()
|
||||
positives = (x + np.arange(1, self.interpretation_steps + 1) * delta).tolist()
|
||||
return negatives + positives, {}
|
||||
|
||||
def get_interpretation_scores(
|
||||
@ -357,7 +353,7 @@ class Slider(InputComponent):
|
||||
if step is None:
|
||||
difference = maximum - minimum
|
||||
power = math.floor(math.log10(difference) - 2)
|
||||
step = 10 ** power
|
||||
step = 10**power
|
||||
self.step = step
|
||||
self.default = minimum if default is None else default
|
||||
self.test_input = self.default
|
||||
@ -406,8 +402,7 @@ class Slider(InputComponent):
|
||||
|
||||
def get_interpretation_neighbors(self, x) -> List[float]:
|
||||
return (
|
||||
np.linspace(self.minimum, self.maximum,
|
||||
self.interpretation_steps).tolist(),
|
||||
np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(),
|
||||
{},
|
||||
)
|
||||
|
||||
@ -944,8 +939,7 @@ class Image(InputComponent):
|
||||
masked_input = np.zeros_like(tokens[0], dtype=int)
|
||||
for token, b in zip(tokens, binary_mask_vector):
|
||||
masked_input = masked_input + token * int(b)
|
||||
masked_inputs.append(
|
||||
processing_utils.encode_array_to_base64(masked_input))
|
||||
masked_inputs.append(processing_utils.encode_array_to_base64(masked_input))
|
||||
return masked_inputs
|
||||
|
||||
def get_interpretation_scores(self, x, neighbors, scores, masks, tokens=None):
|
||||
@ -1042,10 +1036,8 @@ class Video(InputComponent):
|
||||
file_name = file.name
|
||||
uploaded_format = file_name.split(".")[-1].lower()
|
||||
if self.type is not None and uploaded_format != self.type:
|
||||
output_file_name = file_name[0: file_name.rindex(
|
||||
".") + 1] + self.type
|
||||
ff = FFmpeg(inputs={file_name: None},
|
||||
outputs={output_file_name: None})
|
||||
output_file_name = file_name[0 : file_name.rindex(".") + 1] + self.type
|
||||
ff = FFmpeg(inputs={file_name: None}, outputs={output_file_name: None})
|
||||
ff.run()
|
||||
return output_file_name
|
||||
else:
|
||||
@ -1200,8 +1192,7 @@ class Audio(InputComponent):
|
||||
tokens = []
|
||||
masks = []
|
||||
duration = data.shape[0]
|
||||
boundaries = np.linspace(
|
||||
0, duration, self.interpretation_segments + 1).tolist()
|
||||
boundaries = np.linspace(0, duration, self.interpretation_segments + 1).tolist()
|
||||
boundaries = [round(boundary) for boundary in boundaries]
|
||||
for index in range(len(boundaries) - 1):
|
||||
start, stop = boundaries[index], boundaries[index + 1]
|
||||
@ -1211,8 +1202,7 @@ class Audio(InputComponent):
|
||||
leave_one_out_data = np.copy(data)
|
||||
leave_one_out_data[start:stop] = 0
|
||||
file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
|
||||
processing_utils.audio_to_file(
|
||||
sample_rate, leave_one_out_data, file.name)
|
||||
processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name)
|
||||
out_data = processing_utils.encode_file_to_base64(file.name)
|
||||
leave_one_out_sets.append(out_data)
|
||||
file.close()
|
||||
@ -1230,8 +1220,9 @@ class Audio(InputComponent):
|
||||
|
||||
tokens.append(token_data)
|
||||
tokens = [{"name": "token.wav", "data": token} for token in tokens]
|
||||
leave_one_out_sets = [{"name": "loo.wav", "data": loo_set}
|
||||
for loo_set in leave_one_out_sets]
|
||||
leave_one_out_sets = [
|
||||
{"name": "loo.wav", "data": loo_set} for loo_set in leave_one_out_sets
|
||||
]
|
||||
return tokens, leave_one_out_sets, masks
|
||||
|
||||
def get_masked_inputs(self, tokens, binary_mask_matrix):
|
||||
@ -1239,7 +1230,7 @@ class Audio(InputComponent):
|
||||
x = tokens[0]["data"]
|
||||
file_obj = processing_utils.decode_base64_to_file(x)
|
||||
sample_rate, data = processing_utils.audio_from_file(file_obj.name)
|
||||
zero_input = np.zeros_like(data, dtype='int16')
|
||||
zero_input = np.zeros_like(data, dtype="int16")
|
||||
# decode all of the tokens
|
||||
token_data = []
|
||||
for token in tokens:
|
||||
@ -1253,8 +1244,7 @@ class Audio(InputComponent):
|
||||
for t, b in zip(token_data, binary_mask_vector):
|
||||
masked_input = masked_input + t * int(b)
|
||||
file = tempfile.NamedTemporaryFile(delete=False)
|
||||
processing_utils.audio_to_file(
|
||||
sample_rate, masked_input, file.name)
|
||||
processing_utils.audio_to_file(sample_rate, masked_input, file.name)
|
||||
masked_data = processing_utils.encode_file_to_base64(file.name)
|
||||
file.close()
|
||||
os.unlink(file.name)
|
||||
@ -1428,8 +1418,7 @@ class Dataframe(InputComponent):
|
||||
"date": "02/08/1993",
|
||||
}
|
||||
column_dtypes = (
|
||||
[datatype] *
|
||||
self.col_count if isinstance(datatype, str) else datatype
|
||||
[datatype] * self.col_count if isinstance(datatype, str) else datatype
|
||||
)
|
||||
self.test_input = [
|
||||
[sample_values[c] for c in column_dtypes] for _ in range(row_count)
|
||||
|
@ -21,8 +21,14 @@ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Tuple
|
||||
from markdown_it import MarkdownIt
|
||||
from mdit_py_plugins.footnote import footnote_plugin
|
||||
|
||||
from gradio import (encryptor, interpretation, networking, # type: ignore
|
||||
queueing, strings, utils)
|
||||
from gradio import (
|
||||
encryptor,
|
||||
interpretation,
|
||||
networking, # type: ignore
|
||||
queueing,
|
||||
strings,
|
||||
utils,
|
||||
)
|
||||
from gradio.external import load_from_pipeline, load_interface # type: ignore
|
||||
from gradio.flagging import CSVLogger, FlaggingCallback # type: ignore
|
||||
from gradio.inputs import InputComponent
|
||||
@ -243,20 +249,25 @@ class Interface:
|
||||
|
||||
self.session = None
|
||||
self.title = title
|
||||
|
||||
CLEANER = re.compile('<.*?>')
|
||||
|
||||
CLEANER = re.compile("<.*?>")
|
||||
|
||||
def clean_html(raw_html):
|
||||
cleantext = re.sub(CLEANER, '', raw_html)
|
||||
cleantext = re.sub(CLEANER, "", raw_html)
|
||||
return cleantext
|
||||
md = MarkdownIt("js-default", {
|
||||
|
||||
md = MarkdownIt(
|
||||
"js-default",
|
||||
{
|
||||
"linkify": True,
|
||||
"typographer": True,
|
||||
"html": True,
|
||||
}).use(footnote_plugin)
|
||||
|
||||
},
|
||||
).use(footnote_plugin)
|
||||
|
||||
simple_description = None
|
||||
if description is not None:
|
||||
description = md.render(description)
|
||||
description = md.render(description)
|
||||
simple_description = clean_html(description)
|
||||
self.simple_description = simple_description
|
||||
self.description = description
|
||||
@ -264,7 +275,7 @@ class Interface:
|
||||
article = utils.readme_to_html(article)
|
||||
article = md.render(article)
|
||||
self.article = article
|
||||
|
||||
|
||||
self.thumbnail = thumbnail
|
||||
theme = theme if theme is not None else os.getenv("GRADIO_THEME", "default")
|
||||
DEPRECATED_THEME_MAP = {
|
||||
@ -617,6 +628,8 @@ class Interface:
|
||||
encrypt: bool = False,
|
||||
cache_examples: bool = False,
|
||||
favicon_path: Optional[str] = None,
|
||||
ssl_keyfile: Optional[str] = None,
|
||||
ssl_certfile: Optional[str] = None,
|
||||
) -> Tuple[flask.Flask, str, str]:
|
||||
"""
|
||||
Launches the webserver that serves the UI for the interface.
|
||||
@ -634,11 +647,13 @@ class Interface:
|
||||
server_name (str): to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME.
|
||||
show_tips (bool): if True, will occasionally show tips about new Gradio features
|
||||
enable_queue (bool): if True, inference requests will be served through a queue instead of with parallel threads. Required for longer inference times (> 1min) to prevent timeout.
|
||||
width (int): The width in pixels of the <iframe> element containing the interface (used if inline=True)
|
||||
height (int): The height in pixels of the <iframe> element containing the interface (used if inline=True)
|
||||
width (int): The width in pixels of the iframe element containing the interface (used if inline=True)
|
||||
height (int): The height in pixels of the iframe element containing the interface (used if inline=True)
|
||||
encrypt (bool): If True, flagged data will be encrypted by key provided by creator at launch
|
||||
cache_examples (bool): If True, examples outputs will be processed and cached in a folder, and will be used if a user uses an example input.
|
||||
favicon_path (str): If a path to an file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.
|
||||
favicon_path (str): If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for the web page.
|
||||
ssl_keyfile (str): If a path to a file is provided, will use this as the private key file to create a local server running on https.
|
||||
ssl_certfile (str): If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
|
||||
Returns:
|
||||
app (flask.Flask): Flask app object
|
||||
path_to_local_server (str): Locally accessible link
|
||||
@ -680,9 +695,9 @@ class Interface:
|
||||
cache_interface_examples(self)
|
||||
|
||||
server_port, path_to_local_server, app, server = networking.start_server(
|
||||
self, server_name, server_port
|
||||
self, server_name, server_port, ssl_keyfile, ssl_certfile
|
||||
)
|
||||
|
||||
|
||||
self.local_url = path_to_local_server
|
||||
self.server_port = server_port
|
||||
self.status = "RUNNING"
|
||||
@ -708,7 +723,6 @@ class Interface:
|
||||
|
||||
if private_endpoint is not None:
|
||||
share = True
|
||||
self.share = share
|
||||
|
||||
if share:
|
||||
try:
|
||||
@ -723,10 +737,14 @@ class Interface:
|
||||
if self.analytics_enabled:
|
||||
utils.error_analytics(self.ip_address, "Not able to set up tunnel")
|
||||
share_url = None
|
||||
share = False
|
||||
print(strings.en["COULD_NOT_GET_SHARE_LINK"])
|
||||
else:
|
||||
print(strings.en["PUBLIC_SHARE_TRUE"])
|
||||
share_url = None
|
||||
|
||||
self.share = share
|
||||
|
||||
if inbrowser:
|
||||
link = share_url if share else path_to_local_server
|
||||
webbrowser.open(link)
|
||||
|
@ -77,6 +77,8 @@ def start_server(
|
||||
interface: Interface,
|
||||
server_name: Optional[str] = None,
|
||||
server_port: Optional[int] = None,
|
||||
ssl_keyfile: Optional[str] = None,
|
||||
ssl_certfile: Optional[str] = None,
|
||||
) -> Tuple[int, str, fastapi.FastAPI, threading.Thread, None]:
|
||||
"""Launches a local server running the provided Interface
|
||||
Parameters:
|
||||
@ -84,6 +86,8 @@ def start_server(
|
||||
server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME.
|
||||
server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT.
|
||||
auth: If provided, username and password (or list of username-password tuples) required to access interface. Can also provide function that takes username and password and returns True if valid login.
|
||||
ssl_keyfile: If a path to a file is provided, will use this as the private key file to create a local server running on https.
|
||||
ssl_certfile: If a path to a file is provided, will use this as the signed certificate for https. Needs to be provided if ssl_keyfile is provided.
|
||||
"""
|
||||
server_name = server_name or LOCALHOST_NAME
|
||||
# if port is not specified, search for first available port
|
||||
@ -105,7 +109,14 @@ def start_server(
|
||||
port = server_port
|
||||
|
||||
url_host_name = "localhost" if server_name == "0.0.0.0" else server_name
|
||||
path_to_local_server = "http://{}:{}/".format(url_host_name, port)
|
||||
|
||||
if ssl_keyfile is not None:
|
||||
if ssl_certfile is None:
|
||||
raise ValueError("ssl_certfile must be provided if ssl_keyfile is provided.")
|
||||
path_to_local_server = "https://{}:{}/".format(url_host_name, port)
|
||||
else:
|
||||
path_to_local_server = "http://{}:{}/".format(url_host_name, port)
|
||||
|
||||
auth = interface.auth
|
||||
if auth is not None:
|
||||
if not callable(auth):
|
||||
@ -130,7 +141,8 @@ def start_server(
|
||||
if interface.save_to is not None: # Used for selenium tests
|
||||
interface.save_to["port"] = port
|
||||
|
||||
config = uvicorn.Config(app=app, port=port, host=server_name, log_level="warning")
|
||||
config = uvicorn.Config(app=app, port=port, host=server_name, log_level="warning",
|
||||
ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile)
|
||||
server = Server(config=config)
|
||||
server.run_in_thread()
|
||||
return port, path_to_local_server, app, server
|
||||
@ -163,7 +175,7 @@ def url_ok(url: str) -> bool:
|
||||
try:
|
||||
for _ in range(5):
|
||||
time.sleep(0.500)
|
||||
r = requests.head(url, timeout=3)
|
||||
r = requests.head(url, timeout=3, verify=False)
|
||||
if r.status_code in (200, 401, 302): # 401 or 302 if auth is set
|
||||
return True
|
||||
except (ConnectionError, requests.exceptions.ConnectionError):
|
||||
|
@ -442,7 +442,6 @@ class Audio(OutputComponent):
|
||||
return {
|
||||
"audio": {},
|
||||
}
|
||||
|
||||
|
||||
def postprocess(self, y):
|
||||
"""
|
||||
@ -453,7 +452,7 @@ class Audio(OutputComponent):
|
||||
"""
|
||||
if self.type in ["numpy", "file", "auto"]:
|
||||
if self.type == "numpy" or (self.type == "auto" and isinstance(y, tuple)):
|
||||
sample_rate, data = y
|
||||
sample_rate, data = y
|
||||
file = tempfile.NamedTemporaryFile(
|
||||
prefix="sample", suffix=".wav", delete=False
|
||||
)
|
||||
|
6
gradio/package-lock.json
generated
Normal file
6
gradio/package-lock.json
generated
Normal file
@ -0,0 +1,6 @@
|
||||
{
|
||||
"name": "gradio",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {}
|
||||
}
|
@ -18,8 +18,7 @@ CACHE_FILE = os.path.join(CACHED_FOLDER, "log.csv")
|
||||
|
||||
|
||||
def process_example(
|
||||
interface: Interface,
|
||||
example_id: int
|
||||
interface: Interface, example_id: int
|
||||
) -> Tuple[List[Any], List[float]]:
|
||||
"""Loads an example from the interface and returns its prediction."""
|
||||
example_set = interface.examples[example_id]
|
||||
@ -31,9 +30,7 @@ def process_example(
|
||||
return prediction, durations
|
||||
|
||||
|
||||
def cache_interface_examples(
|
||||
interface: Interface
|
||||
) -> None:
|
||||
def cache_interface_examples(interface: Interface) -> None:
|
||||
"""Caches all of the examples from an interface."""
|
||||
if os.path.exists(CACHE_FILE):
|
||||
print(
|
||||
@ -54,10 +51,7 @@ def cache_interface_examples(
|
||||
raise e
|
||||
|
||||
|
||||
def load_from_cache(
|
||||
interface: Interface,
|
||||
example_id: int
|
||||
) -> List[Any]:
|
||||
def load_from_cache(interface: Interface, example_id: int) -> List[Any]:
|
||||
"""Loads a particular cached example for the interface."""
|
||||
with open(CACHE_FILE) as cache:
|
||||
examples = list(csv.reader(cache))
|
||||
|
@ -138,32 +138,37 @@ def audio_to_file(sample_rate, data, filename):
|
||||
channels=(1 if len(data.shape) == 1 else data.shape[1]),
|
||||
)
|
||||
audio.export(filename, format="wav").close()
|
||||
|
||||
|
||||
|
||||
|
||||
def convert_to_16_bit_wav(data):
|
||||
# Based on: https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.wavfile.write.html
|
||||
if data.dtype==np.float32:
|
||||
warnings.warn("Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format.")
|
||||
if data.dtype == np.float32:
|
||||
warnings.warn(
|
||||
"Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format."
|
||||
)
|
||||
data = data / np.abs(data).max()
|
||||
data = data * 32767
|
||||
data = data.astype(np.int16)
|
||||
elif data.dtype==np.int32:
|
||||
warnings.warn("Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format.")
|
||||
elif data.dtype == np.int32:
|
||||
warnings.warn(
|
||||
"Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format."
|
||||
)
|
||||
data = data / 65538
|
||||
data = data.astype(np.int16)
|
||||
elif data.dtype==np.int16:
|
||||
elif data.dtype == np.int16:
|
||||
pass
|
||||
elif data.dtype==np.uint8:
|
||||
warnings.warn("Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format.")
|
||||
elif data.dtype == np.uint8:
|
||||
warnings.warn(
|
||||
"Audio data is not in 16-bit integer format."
|
||||
"Trying to convert to 16-bit int format."
|
||||
)
|
||||
data = data * 257 - 32768
|
||||
data = data.astype(np.int16)
|
||||
else:
|
||||
raise ValueError("Audio data cannot be converted to "
|
||||
"16-bit int format.")
|
||||
return data
|
||||
raise ValueError("Audio data cannot be converted to " "16-bit int format.")
|
||||
return data
|
||||
|
||||
|
||||
##################
|
||||
@ -330,7 +335,7 @@ def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
Output image array. Has the same kind as `a`.
|
||||
"""
|
||||
kind = a.dtype.kind
|
||||
if n > m and a.max() < 2 ** m:
|
||||
if n > m and a.max() < 2**m:
|
||||
mnew = int(np.ceil(m / 2) * 2)
|
||||
if mnew > m:
|
||||
dtype = "int{}".format(mnew)
|
||||
@ -353,11 +358,11 @@ def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
# exact upscale to a multiple of `n` bits
|
||||
if copy:
|
||||
b = np.empty(a.shape, _dtype_bits(kind, m))
|
||||
np.multiply(a, (2 ** m - 1) // (2 ** n - 1), out=b, dtype=b.dtype)
|
||||
np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
||||
return b
|
||||
else:
|
||||
a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
|
||||
a *= (2 ** m - 1) // (2 ** n - 1)
|
||||
a *= (2**m - 1) // (2**n - 1)
|
||||
return a
|
||||
else:
|
||||
# upscale to a multiple of `n` bits,
|
||||
@ -365,12 +370,12 @@ def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
o = (m // n + 1) * n
|
||||
if copy:
|
||||
b = np.empty(a.shape, _dtype_bits(kind, o))
|
||||
np.multiply(a, (2 ** o - 1) // (2 ** n - 1), out=b, dtype=b.dtype)
|
||||
np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
|
||||
b //= 2 ** (o - m)
|
||||
return b
|
||||
else:
|
||||
a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
|
||||
a *= (2 ** o - 1) // (2 ** n - 1)
|
||||
a *= (2**o - 1) // (2**n - 1)
|
||||
a //= 2 ** (o - m)
|
||||
return a
|
||||
|
||||
|
@ -10,9 +10,7 @@ import requests
|
||||
DB_FILE = "gradio_queue.db"
|
||||
|
||||
|
||||
def queue_thread(
|
||||
path_to_local_server: str
|
||||
) -> None:
|
||||
def queue_thread(path_to_local_server: str) -> None:
|
||||
while True:
|
||||
try:
|
||||
next_job = pop()
|
||||
@ -108,10 +106,7 @@ def pop() -> Tuple[int, str, Dict, str]:
|
||||
return result[0], result[1], json.loads(result[2]), result[3]
|
||||
|
||||
|
||||
def push(
|
||||
input_data: Dict,
|
||||
action: str
|
||||
) -> Tuple[str, int]:
|
||||
def push(input_data: Dict, action: str) -> Tuple[str, int]:
|
||||
input_data = json.dumps(input_data)
|
||||
hash = generate_hash()
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
@ -140,7 +135,7 @@ def push(
|
||||
"""
|
||||
)
|
||||
result = c.fetchone()
|
||||
if not(result[0] == 0):
|
||||
if not (result[0] == 0):
|
||||
queue_position += 1
|
||||
conn.commit()
|
||||
return hash, queue_position
|
||||
@ -204,7 +199,7 @@ def get_status(hash: str) -> Tuple[str, int]:
|
||||
"""
|
||||
)
|
||||
result = c.fetchone()
|
||||
if not(result[0] == 0):
|
||||
if not (result[0] == 0):
|
||||
queue_position += 1
|
||||
conn.commit()
|
||||
return "QUEUED", queue_position
|
||||
@ -229,10 +224,7 @@ def start_job(hash: str) -> None:
|
||||
conn.commit()
|
||||
|
||||
|
||||
def fail_job(
|
||||
hash: str,
|
||||
error_message: str
|
||||
) -> None:
|
||||
def fail_job(hash: str, error_message: str) -> None:
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute(
|
||||
@ -247,10 +239,7 @@ def fail_job(
|
||||
conn.commit()
|
||||
|
||||
|
||||
def pass_job(
|
||||
hash: str,
|
||||
output_data: Dict
|
||||
) -> None:
|
||||
def pass_job(hash: str, output_data: Dict) -> None:
|
||||
output_data = json.dumps(output_data)
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
|
@ -130,6 +130,7 @@ def static_resource(path: str):
|
||||
return FileResponse(static_file)
|
||||
raise HTTPException(status_code=404, detail="Static file not found")
|
||||
|
||||
|
||||
@app.get("/build/{path:path}")
|
||||
def build_resource(path: str):
|
||||
if app.interface.share:
|
||||
|
@ -7,6 +7,7 @@ MESSAGING_API_ENDPOINT = "https://api.gradio.app/gradio-messaging/en"
|
||||
en = {
|
||||
"RUNNING_LOCALLY": "Running on local URL: {}",
|
||||
"SHARE_LINK_DISPLAY": "Running on public URL: {}",
|
||||
"COULD_NOT_GET_SHARE_LINK": "\nCould not create share link, please check your internet connection.",
|
||||
"COLAB_NO_LOCAL": "Cannot display local interface on google colab, public link created.",
|
||||
"PUBLIC_SHARE_TRUE": "\nTo create a public link, set `share=True` in `launch()`.",
|
||||
"MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {} (may take up to a minute for link to be usable)",
|
||||
@ -18,7 +19,7 @@ en = {
|
||||
"COLAB_DEBUG_TRUE": "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. "
|
||||
"To turn off, set debug=False in launch().",
|
||||
"COLAB_DEBUG_FALSE": "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()",
|
||||
"SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting, check out Spaces (https://huggingface.co/spaces)",
|
||||
"SHARE_LINK_MESSAGE": "\nThis share link expires in 72 hours. For free permanent hosting, check out Spaces (https://www.huggingface.co/spaces)",
|
||||
"PRIVATE_LINK_MESSAGE": "Since this is a private endpoint, this share link will never expire.",
|
||||
"INLINE_DISPLAY_BELOW": "Interface loading below...",
|
||||
"MEDIA_PERMISSIONS_IN_COLAB": "Your interface requires microphone or webcam permissions - this may cause issues in Colab. Use the External URL in case of issues.",
|
||||
|
@ -4,10 +4,10 @@
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<link rel='stylesheet' href='/build/bundle.css'>
|
||||
<link rel='stylesheet' href='/build/themes.css'>
|
||||
<link rel='stylesheet' href='build/bundle.css'>
|
||||
<link rel='stylesheet' href='build/themes.css'>
|
||||
|
||||
<link rel="stylesheet" href="./global.css">
|
||||
<link rel="stylesheet" href="build/global.css">
|
||||
|
||||
<title>{{ config['title'] or 'Gradio' }}</title>
|
||||
<meta property="og:url" content="https://gradio.app/" />
|
||||
@ -20,7 +20,9 @@
|
||||
<meta name="twitter:title" content="{{ config['title'] or '' }}">
|
||||
<meta name="twitter:description" content="{{ config['simple_description'] or '' }}">
|
||||
<meta name="twitter:image" content="{{ config['thumbnail'] or '' }}">
|
||||
{%if config['analytics_enabled'] %}
|
||||
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1"></script>
|
||||
{% endif %}
|
||||
<script>
|
||||
window.dataLayer = window.dataLayer || [];
|
||||
function gtag() {
|
||||
@ -41,6 +43,6 @@
|
||||
<div id="root" style="height: 100%"></div>
|
||||
|
||||
</body>
|
||||
<script defer src='/build/bundle.js'></script>
|
||||
<script defer src='build/bundle.js'></script>
|
||||
|
||||
</html>
|
||||
|
@ -195,6 +195,7 @@ def get_config_file(interface: Interface) -> Dict[str, Any]:
|
||||
"show_input": interface.show_input,
|
||||
"show_output": interface.show_output,
|
||||
"title": interface.title,
|
||||
"analytics_enabled": interface.analytics_enabled,
|
||||
"description": interface.description,
|
||||
"simple_description": interface.simple_description,
|
||||
"article": interface.article,
|
||||
|
@ -1 +1 @@
|
||||
2.7.5.2
|
||||
2.8.0b5
|
103
guides/building_a_pictionary_app.md
Normal file
103
guides/building_a_pictionary_app.md
Normal file
@ -0,0 +1,103 @@
|
||||
# Building a Pictionary App
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/nateraw/quickdraw
|
||||
tags: SKETCHPAD, LABELS, LIVE
|
||||
|
||||
## Introduction
|
||||
|
||||
How well can an algorithm guess what you're drawing? A few years ago, Google released the **Quick Draw** dataset, which contains drawings made by humans of a variety of every objects. Researchers have used this dataset to train models to guess Pictionary-style drawings. Such models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Let's get started!
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Make sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained sketchpad model, also install `torch`.
|
||||
|
||||
## Step 1 — Setting up the Sketch Recognition Model
|
||||
|
||||
First, you will need a sketch recognition model. Since many researchers have already trained their own models on the Quick Draw dataset, we will use a pretrained model in this tutorial. Our model is a light 1.5 MB model trained by Nate Raw, that [you can download here](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/pytorch_model.bin).
|
||||
|
||||
If you are interested, here [is the code](https://github.com/nateraw/quickdraw-pytorch) that was used to train the model. We will simply load the pretrained model in PyTorch, as follows:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
model = nn.Sequential(
|
||||
nn.Conv2d(1, 32, 3, padding='same'),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2),
|
||||
nn.Conv2d(32, 64, 3, padding='same'),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2),
|
||||
nn.Conv2d(64, 128, 3, padding='same'),
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2),
|
||||
nn.Flatten(),
|
||||
nn.Linear(1152, 256),
|
||||
nn.ReLU(),
|
||||
nn.Linear(256, len(LABELS)),
|
||||
)
|
||||
state_dict = torch.load('pytorch_model.bin', map_location='cpu')
|
||||
model.load_state_dict(state_dict, strict=False)
|
||||
model.eval()
|
||||
```
|
||||
|
||||
## Step 2 — Defining a `predict` function
|
||||
|
||||
Next, you will need to define a function that takes in the *user input*, which in this case is a sketched image, and returns the prediction. The prediction should be returned as a dictionary whose keys are class name and values are confidence probabilities. We will load the class names from this [text file](https://huggingface.co/spaces/nateraw/quickdraw/blob/main/class_names.txt).
|
||||
|
||||
In the case of our pretrained model, it will look like this:
|
||||
|
||||
```python
|
||||
from pathlib import Path
|
||||
|
||||
LABELS = Path('class_names.txt').read_text().splitlines()
|
||||
|
||||
def predict(img):
|
||||
x = torch.tensor(img, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.
|
||||
with torch.no_grad():
|
||||
out = model(x)
|
||||
probabilities = torch.nn.functional.softmax(out[0], dim=0)
|
||||
values, indices = torch.topk(probabilities, 5)
|
||||
confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
|
||||
return confidences
|
||||
```
|
||||
|
||||
Let's break this down. The function takes one parameters:
|
||||
|
||||
* `img`: the input image as a `numpy` array
|
||||
|
||||
Then, the function converts the image to a PyTorch `tensor`, passes it through the model, and returns:
|
||||
|
||||
* `confidences`: the top five predictions, as a dictionary whose keys are class labels and whose values are confidence probabilities
|
||||
|
||||
## Step 3 — Creating a Gradio Interface
|
||||
|
||||
Now that we have our predictive function set up, we can create a Gradio Interface around it.
|
||||
|
||||
In this case, the input component is a sketchpad. To create a sketchpad input, we can use the convenient string shortcut, `"sketchpad"` which creates a canvas for a user to draw on and handles the preprocessing to convert that to a numpy array.
|
||||
|
||||
The output component will be a `"label"`, which displays the top labels in a nice form.
|
||||
|
||||
Finally, we'll add one more parameter, setting `live=True`, which allows our interface to run in real time, adjusting its predictions every time a user draws on the sketchpad. The code for Gradio looks like this:
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(fn=predict,
|
||||
inputs="sketchpad",
|
||||
outputs="label",
|
||||
live=True).launch()
|
||||
```
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try drawing something, like a "snake" or a "laptop"):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
And you're done! That's all the code you need to build a Pictionary-style guessing app. Have fun and try to find some edge cases 🧐
|
||||
|
145
guides/creating_a_chatbot.md
Normal file
145
guides/creating_a_chatbot.md
Normal file
@ -0,0 +1,145 @@
|
||||
# How to Create a Chatbot
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/abidlabs/chatbot-minimal, https://huggingface.co/spaces/ThomasSimonini/Chat-with-Gandalf-GPT-J6B, https://huggingface.co/spaces/gorkemgoknar/moviechatbot, https://huggingface.co/spaces/Kirili4ik/chat-with-Kirill
|
||||
tags: NLP, TEXT, HTML
|
||||
## Introduction
|
||||
|
||||
Chatbots are widely studied in natural language processing (NLP) research and are a common use case of NLP in industry. Because chatbots are designed to be used directly by customers and end users, it is important to validate that chatbots are behaving as expected when confronted with a wide variety of input prompts. Using `gradio`, you can easily build a demo of your chatbot model and share that with a testing team, or test it yourself using an intuitive chatbot GUI.
|
||||
|
||||
This tutorial will show how to take a pretrained chatbot model and deploy it with a Gradio interface in 4 steps. The live chatbot interface that we create will look something like this (try it!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Chatbots are *stateful*, meaning that the model's prediction can change depending on how the user has previously interacted with the model. So, in this tutorial, we will also cover how to use **state** with Gradio demos.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Make sure you have the `gradio` Python package already [installed](/getting_started). To use a pretrained chatbot model, also install `transformers` and `torch`.
|
||||
|
||||
## Step 1 — Setting up the Chatbot Model
|
||||
|
||||
First, you will need to have a chatbot model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will use a pretrained chatbot model, `DialoGPT`, and its tokenizer from the [Hugging Face Hub](https://huggingface.co/microsoft/DialoGPT-medium), but you can replace this with your own model.
|
||||
|
||||
Here is the code to load `DialoGPT` from Hugging Face `transformers`.
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
||||
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
|
||||
```
|
||||
|
||||
## Step 2 — Defining a `predict` function
|
||||
|
||||
Next, you will need to define a function that takes in the *user input* as well as the previous *chat history* to generate a response.
|
||||
|
||||
In the case of our pretrained model, it will look like this:
|
||||
|
||||
```python
|
||||
def predict(input, history=[]):
|
||||
# tokenize the new input sentence
|
||||
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
|
||||
|
||||
# append the new user input tokens to the chat history
|
||||
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
||||
|
||||
# generate a response
|
||||
history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
|
||||
# convert the tokens to text, and then split the responses into lines
|
||||
response = tokenizer.decode(history[0]).replace("<|endoftext|>", "\n")
|
||||
|
||||
return response, history
|
||||
```
|
||||
|
||||
Let's break this down. The function takes two parameters:
|
||||
|
||||
* `input`: which is what the user enters (through the Gradio GUI) in a particular step of the conversation.
|
||||
* `history`: which represents the **state**, consisting of the list of user and bot responses. To create a stateful Gradio demo, we *must* pass in a parameter to represent the state, and we set the default value of this parameter to be the initial value of the state (in this case, the empty list since this is what we would like the chat history to be at the start).
|
||||
|
||||
Then, the function tokenizes the input and concatenates it with the tokens corresponding to the previous user and bot responses. Then, this is fed into the pretrained model to get a prediction. Finally, we do some cleaning up so that we can return two values from our function:
|
||||
|
||||
* `response`: which is a list of strings corresponding to all of the user and bot responses. This will be rendered as the output in the Gradio demo.
|
||||
* `history` variable, which is the token representation of all of the user and bot responses. In stateful Gradio demos, we *must* return the updated state at the end of the function.
|
||||
|
||||
## Step 3 — Creating a Gradio Interface
|
||||
|
||||
Now that we have our predictive function set up, we can create a Gradio Interface around it.
|
||||
|
||||
In this case, our function takes in two values, a text input and a state input. The corresponding input components in `gradio` are `"text"` and `"state"`.
|
||||
|
||||
The function also returns two values. For now, we will display the list of responses as `"text"` and use the `"state"` output component type for the second return value.
|
||||
|
||||
Note that the `"state"` input and output components are not displayed.
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(fn=predict,
|
||||
inputs=["text", "state"],
|
||||
outputs=["text", "state"]).launch()
|
||||
```
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try typing in some simple greetings like "Hi!" to get started):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-minimal/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
## Step 4 — Styling Your Interface
|
||||
|
||||
The problem is that the output of the chatbot looks pretty ugly. No problem, we can make it prettier by using a little bit of CSS. First, we modify our function to return a string of HTML components, instead of just text:
|
||||
|
||||
```python
|
||||
def predict(input, history=[]):
|
||||
# tokenize the new input sentence
|
||||
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
|
||||
|
||||
# append the new user input tokens to the chat history
|
||||
bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
|
||||
|
||||
# generate a response
|
||||
history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
|
||||
|
||||
# convert the tokens to text, and then split the responses into lines
|
||||
response = tokenizer.decode(history[0]).split("<|endoftext|>")
|
||||
response.remove("")
|
||||
|
||||
# write some HTML
|
||||
html = "<div class='chatbot'>"
|
||||
for m, msg in enumerate(response):
|
||||
cls = "user" if m%2 == 0 else "bot"
|
||||
html += "<div class='msg {}'> {}</div>".format(cls, msg)
|
||||
html += "</div>"
|
||||
|
||||
return html, history
|
||||
```
|
||||
|
||||
Now, we change the first output component to be `"html"` instead, since now we are returning a string of HTML code. We also include some custom css to make the output prettier using the `css` parameter.
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
css = """
|
||||
.chatbox {display:flex;flex-direction:column}
|
||||
.msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
|
||||
.msg.user {background-color:cornflowerblue;color:white}
|
||||
.msg.bot {background-color:lightgray;align-self:self-end}
|
||||
"""
|
||||
|
||||
gr.Interface(fn=predict,
|
||||
inputs=[gr.inputs.Textbox(placeholder="How are you?"), "state"],
|
||||
outputs=["html", "state"],
|
||||
css=css).launch()
|
||||
```
|
||||
|
||||
Notice that we have also added a placeholder to the input `text` component by instantiating the `gr.inputs.Textbox()` class and passing in a `placeholder` value, and now we are good to go! Try it out below:
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
And you're done! That's all the code you need to build an interface for your chatbot model. Here are some references that you may find useful:
|
||||
|
||||
* Gradio's ["Getting Started" guide]()
|
||||
* The [chatbot demo]() and [complete code]() (on Hugging Face Spaces)
|
||||
|
||||
|
@ -1,8 +1,10 @@
|
||||
## Getting Started
|
||||
|
||||
**Prerequisite**: Python 3.7+ and that's it!
|
||||
|
||||
### Quick Start
|
||||
|
||||
To get Gradio running with a simple example, follow these three steps:
|
||||
To get Gradio running with a simple "Hello, World" example, follow these three steps:
|
||||
|
||||
<span>1.</span> Install Gradio from pip.
|
||||
|
||||
@ -18,15 +20,15 @@ pip install gradio
|
||||
|
||||
{{ demos["hello_world"] }}
|
||||
|
||||
### The Interface
|
||||
### Understanding the `Interface` class
|
||||
|
||||
Gradio can wrap almost any Python function with an easy-to-use user interface. That function could be anything from a simple tax calculator to a pretrained machine learning model.
|
||||
Gradio can wrap almost any Python function with an easy-to-use user interface. In the example above, we saw a simple text-based function. But the function could be anything from image enhancer to a tax calculator to (most commonly) the prediction function of a pretrained machine learning model.
|
||||
|
||||
The core `Interface` class is initialized with three parameters:
|
||||
|
||||
- `fn`: the function to wrap
|
||||
- `inputs`: the input component type(s)
|
||||
- `outputs`: the output component type(s)
|
||||
- `inputs`: the input component type(s), e.g. `"image"` or `"audio"` ([see docs for complete list](/docs))
|
||||
- `outputs`: the output component type(s) e.g. `"image"` or `"label"` ([see docs for complete list](/docs))
|
||||
|
||||
With these three arguments, we can quickly create interfaces and `launch()` them. But what if you want to change how the UI components look or behave?
|
||||
|
||||
@ -84,14 +86,14 @@ Note there is no submit button, because the interface resubmits automatically on
|
||||
|
||||
### Using State
|
||||
|
||||
Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls, you can create a global variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.
|
||||
Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a global variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.
|
||||
|
||||
Another type of data persistence Gradio supports is session state, where data persists across multiple submits within a page load. To store data with this permanence, use `gr.get_state` and `gr.set_state` methods.
|
||||
Another type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page load. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things: (1) Pass in an extra parameter into your function, which represents the state of the interface. (2) At the end of the function, return the updated value of the state as an extra return value (3) Add the `'state'` input and `'state'` output components when creating your `Interface`. See the chatbot example below:
|
||||
|
||||
{{ code["chatbot"] }}
|
||||
{{ demos["chatbot"] }}
|
||||
|
||||
Notice how the state persists across submits within each page, but the state is not shared between the two pages.
|
||||
Notice how the state persists across submits within each page, but the state is not shared between the two pages. Some more points to note: you can pass in a default value to the state parameter, which is used as the initial value of the state. The state must be a something that can be serialized to a JSON format (e.g. a dictionary, a list, or a single value. Typically, objects will not work).
|
||||
|
||||
### Flagging
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
# Welcome to Gradio
|
||||
|
||||
Quickly create customizable UI components around your models. Gradio makes it easy for you to "play around" with your model in your browser by dragging-and-dropping in your own images, pasting your own text, recording your own voice, etc. and seeing what the model outputs.
|
||||
Quickly create beautiful user interfaces around your machine learning models. Gradio (pronounced GRAY-dee-oh) makes it easy for you to demo your model in your browser or let people "try it out" by dragging-and-dropping in their own images, pasting text, recording their own voice, etc. and seeing what the model outputs.
|
||||
|
||||

|
||||
|
||||
@ -12,7 +12,7 @@ Gradio is useful for:
|
||||
|
||||
* **Deploying** your models quickly with automatic shareable links and getting feedback on model performance
|
||||
|
||||
* **Debugging** your model interactively during development using built-in interpretation visualizations for any model
|
||||
* **Debugging** your model interactively during development using built-in manipulation and interpretation tools
|
||||
|
||||
**You can find an interactive version of the following Getting Started at [https://gradio.app/getting_started](https://gradio.app/getting_started).**
|
||||
|
||||
|
156
guides/using_flagging.md
Normal file
156
guides/using_flagging.md
Normal file
@ -0,0 +1,156 @@
|
||||
# Using Flagging
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/aliabd/calculator-flagging-crowdsourced, https://huggingface.co/spaces/aliabd/calculator-flagging-options, https://huggingface.co/spaces/aliabd/calculator-flag-basic
|
||||
tags: FLAGGING, DATA
|
||||
|
||||
## The `flag` button
|
||||
|
||||
Underneath the output interfaces, there is a button marked `flag`. When a user testing your model sees input with interesting output, such as erroneous or unexpected model behaviour, they can flag the input for the interface creator to review.
|
||||
|
||||

|
||||
|
||||
There are four parameters `gr.Interface` that control how flagging works. We will go over them in greater detail.
|
||||
|
||||
* `allow_flagging`:
|
||||
* This parameter can be set to either `"manual"`, `"auto"`, or `"never"`.
|
||||
* `manual`: users will see a button to flag, and events are only flagged when it's clicked.
|
||||
* `auto`: users will not see a button to flag, but every event will be flagged automatically.
|
||||
* `never`: users will not see a button to flag, and no event will be flagged.
|
||||
* `flagging_options`:
|
||||
* This parameter takes a list of strings.
|
||||
* If provided, allows user to select from a list of options when flagging. Only applies if `allow_flagging` is `"manual"`.
|
||||
* The chosen option is then piped along with the input and output.
|
||||
* `flagging_dir`:
|
||||
* This parameter takes a string.
|
||||
* What to name the directory where flagged data is stored.
|
||||
* `flagging_callback`:
|
||||
* Using this parameter allows you to write custom code that gets run when the flag button is clicked
|
||||
* One example is setting it to `gr.HuggingFaceDatasetSaver` which can allow you to pipe any flagged data into a HuggingFace Dataset.
|
||||
|
||||
## The data:
|
||||
|
||||
Within the directory provided by the `flagging_dir` argument, a CSV file will log the flagged data.
|
||||
|
||||
Here's an example: The code below creates the calculator interface embedded below it:
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
|
||||
def calculator(num1, operation, num2):
|
||||
if operation == "add":
|
||||
return num1 + num2
|
||||
elif operation == "subtract":
|
||||
return num1 - num2
|
||||
elif operation == "multiply":
|
||||
return num1 * num2
|
||||
elif operation == "divide":
|
||||
return num1 / num2
|
||||
|
||||
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual"
|
||||
)
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flag-basic/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.
|
||||
|
||||
```directory
|
||||
+-- flagged/
|
||||
| +-- logs.csv
|
||||
```
|
||||
_flagged/logs.csv_
|
||||
```csv
|
||||
num1,operation,num2,Output,timestamp
|
||||
5,add,7,12,2022-01-31 11:40:51.093412
|
||||
6,subtract,1.5,4.5,2022-01-31 03:25:32.023542
|
||||
```
|
||||
|
||||
If the interface involves file data, such as for Image and Audio components, folders will be created to store those flagged data as well. For example an `image` input to `image` output interface will create the following structure.
|
||||
|
||||
```directory
|
||||
+-- flagged/
|
||||
| +-- logs.csv
|
||||
| +-- image/
|
||||
| | +-- 0.png
|
||||
| | +-- 1.png
|
||||
| +-- Output/
|
||||
| | +-- 0.png
|
||||
| | +-- 1.png
|
||||
```
|
||||
_flagged/logs.csv_
|
||||
```csv
|
||||
im,Output timestamp
|
||||
im/0.png,Output/0.png,2022-02-04 19:49:58.026963
|
||||
im/1.png,Output/1.png,2022-02-02 10:40:51.093412
|
||||
```
|
||||
|
||||
If you wish for the user to provide a reason for flagging, you can pass a list of strings to the `flagging_options` argument of Interface. Users will have to select one of the strings when flagging, which will be saved as an additional column to the CSV.
|
||||
|
||||
If we go back to the calculator example, the following code will create the interface embedded below it.
|
||||
```python
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual",
|
||||
flagging_options=["wrong sign", "off by one", "other"]
|
||||
)
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-options/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
When users click the flag button, the csv file will now include a column indicating the selected option.
|
||||
|
||||
_flagged/logs.csv_
|
||||
```csv
|
||||
num1,operation,num2,Output,flag,timestamp
|
||||
5,add,7,-12,wrong sign,2022-02-04 11:40:51.093412
|
||||
6,subtract,1.5,3.5,off by one,2022-02-04 11:42:32.062512
|
||||
```
|
||||
|
||||
## Doing more with the data
|
||||
|
||||
Suppose you want to take some action on the flagged data, instead of just saving it. Perhaps you want to trigger your model to retrain, or even just share it with others in a cloud dataset. We've made this super easy with the `flagging_callback` parameter.
|
||||
|
||||
For example, below we're going to pipe flagged data from our calculator example into a crowd-sourced Hugging Face Dataset.
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
HF_TOKEN = os.getenv('HF_TOKEN')
|
||||
hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo")
|
||||
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual",
|
||||
flagging_options=["wrong sign", "off by one", "other"],
|
||||
flagging_callback=hf_writer
|
||||
)
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-crowdsourced/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
You can now see all the examples flagged above in this [public HF dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo/blob/main/data.csv).
|
||||
|
||||

|
||||
|
||||
We created the `gr.HuggingFaceDatasetSaver` class, but you can pass your own custom class as long as it inherits from `FLaggingCallback` defined in [this file](https://github.com/gradio-app/gradio/blob/master/gradio/flagging.py). If you create a cool callback, please contribute it to the repo!
|
||||
|
||||
## Privacy
|
||||
|
||||
Please make sure your users understand when the data they submit is being saved, and what you plan on doing with it. This is especially important when you use `allow_flagging=auto`. We suggest including this info in the description so that it's read before the interface.
|
||||
|
||||
### That's all! Happy building :)
|
@ -5,5 +5,5 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
|
||||
else
|
||||
echo "Running the frontend"
|
||||
cd frontend
|
||||
npm start
|
||||
npm run dev
|
||||
fi
|
||||
|
2
setup.py
2
setup.py
@ -5,7 +5,7 @@ except ImportError:
|
||||
|
||||
setup(
|
||||
name="gradio",
|
||||
version="2.7.5.2",
|
||||
version="2.8.0b5",
|
||||
include_package_data=True,
|
||||
description="Python library for easily interacting with trained machine learning models",
|
||||
author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq",
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 304 KiB After Width: | Height: | Size: 236 KiB |
@ -122,7 +122,10 @@ class TestDemo(unittest.TestCase):
|
||||
time.sleep(0.2)
|
||||
total_sleep += 0.2
|
||||
|
||||
self.assertEqual(elem.text, "L + e + W - a - n - t ' + s + t - g + o s e e a m a g i c t r i c k ? - ! +")
|
||||
self.assertEqual(
|
||||
elem.text,
|
||||
"L + e + W - a - n - t ' + s + t - g + o s e e a m a g i c t r i c k ? - ! +",
|
||||
)
|
||||
golden_img = os.path.join(
|
||||
current_dir, GOLDEN_PATH.format("diff_texts", "magic_trick")
|
||||
)
|
||||
|
@ -12,12 +12,12 @@ class TestKeyGenerator(unittest.TestCase):
|
||||
def test_same_pass(self):
|
||||
key1 = encryptor.get_key("test")
|
||||
key2 = encryptor.get_key("test")
|
||||
self.assertEquals(key1, key2)
|
||||
self.assertEquals(key1, key2)
|
||||
|
||||
def test_diff_pass(self):
|
||||
key1 = encryptor.get_key("test")
|
||||
key2 = encryptor.get_key("diff_test")
|
||||
self.assertNotEquals(key1, key2)
|
||||
self.assertNotEquals(key1, key2)
|
||||
|
||||
|
||||
class TestEncryptorDecryptor(unittest.TestCase):
|
||||
@ -27,7 +27,7 @@ class TestEncryptorDecryptor(unittest.TestCase):
|
||||
encrypted_data = encryptor.encrypt(key, data)
|
||||
decrypted_data = encryptor.decrypt(key, encrypted_data)
|
||||
self.assertEquals(data, decrypted_data)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -27,7 +27,6 @@ class TestHuggingFaceModelAPI(unittest.TestCase):
|
||||
self.assertIsInstance(interface_info["inputs"], gr.inputs.Audio)
|
||||
self.assertIsInstance(interface_info["outputs"], gr.outputs.Audio)
|
||||
|
||||
|
||||
def test_question_answering(self):
|
||||
model_type = "question-answering"
|
||||
interface_info = gr.external.get_huggingface_interface(
|
||||
|
@ -38,7 +38,7 @@ class TestSimpleFlagging(unittest.TestCase):
|
||||
self.assertEqual(row_count, 1) # no header in SimpleCSVLogger
|
||||
io.close()
|
||||
|
||||
|
||||
|
||||
class TestHuggingFaceDatasetSaver(unittest.TestCase):
|
||||
def test_saver_setup(self):
|
||||
huggingface_hub.create_repo = MagicMock()
|
||||
@ -47,7 +47,7 @@ class TestHuggingFaceDatasetSaver(unittest.TestCase):
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
flagger.setup(tmpdirname)
|
||||
huggingface_hub.create_repo.assert_called_once()
|
||||
|
||||
|
||||
def test_saver_flag(self):
|
||||
huggingface_hub.create_repo = MagicMock()
|
||||
huggingface_hub.Repository = MagicMock()
|
||||
@ -65,7 +65,7 @@ class TestHuggingFaceDatasetSaver(unittest.TestCase):
|
||||
self.assertEqual(row_count, 1) # 2 rows written including header
|
||||
row_count = io.flagging_callback.flag(io, ["test"], ["test"])
|
||||
self.assertEqual(row_count, 2) # 3 rows written including header
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -147,10 +147,10 @@ class TestNumber(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_in_interface(self):
|
||||
iface = gr.Interface(lambda x: x ** 2, "number", "textbox")
|
||||
iface = gr.Interface(lambda x: x**2, "number", "textbox")
|
||||
self.assertEqual(iface.process([2])[0], ["4.0"])
|
||||
iface = gr.Interface(
|
||||
lambda x: x ** 2, "number", "textbox", interpretation="default"
|
||||
lambda x: x**2, "number", "textbox", interpretation="default"
|
||||
)
|
||||
scores, alternative_outputs = iface.interpret([2])
|
||||
self.assertEqual(
|
||||
@ -211,10 +211,10 @@ class TestSlider(unittest.TestCase):
|
||||
)
|
||||
|
||||
def test_in_interface(self):
|
||||
iface = gr.Interface(lambda x: x ** 2, "slider", "textbox")
|
||||
iface = gr.Interface(lambda x: x**2, "slider", "textbox")
|
||||
self.assertEqual(iface.process([2])[0], ["4"])
|
||||
iface = gr.Interface(
|
||||
lambda x: x ** 2, "slider", "textbox", interpretation="default"
|
||||
lambda x: x**2, "slider", "textbox", interpretation="default"
|
||||
)
|
||||
scores, alternative_outputs = iface.interpret([2])
|
||||
self.assertEqual(
|
||||
@ -568,10 +568,10 @@ class TestAudio(unittest.TestCase):
|
||||
|
||||
def test_tokenize(self):
|
||||
x_wav = gr.test_data.BASE64_AUDIO
|
||||
audio_input = gr.inputs.Audio()
|
||||
audio_input = gr.inputs.Audio()
|
||||
tokens, _, _ = audio_input.tokenize(x_wav)
|
||||
self.assertEquals(len(tokens), audio_input.interpretation_segments)
|
||||
x_new = audio_input.get_masked_inputs(tokens, [[1]*len(tokens)])[0]
|
||||
x_new = audio_input.get_masked_inputs(tokens, [[1] * len(tokens)])[0]
|
||||
similarity = SequenceMatcher(a=x_wav["data"], b=x_new).ratio()
|
||||
self.assertGreater(similarity, 0.9)
|
||||
|
||||
|
@ -6,8 +6,7 @@ import numpy as np
|
||||
import gradio.interpretation
|
||||
import gradio.test_data
|
||||
from gradio import Interface
|
||||
from gradio.processing_utils import (decode_base64_to_image,
|
||||
encode_array_to_base64)
|
||||
from gradio.processing_utils import decode_base64_to_image, encode_array_to_base64
|
||||
|
||||
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
||||
|
||||
|
@ -8,14 +8,17 @@ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
|
||||
|
||||
class TestProcessExamples(unittest.TestCase):
|
||||
def test_process_example(self):
|
||||
io = Interface(lambda x: "Hello " + x, "text", "text",
|
||||
examples=[["World"]])
|
||||
io = Interface(lambda x: "Hello " + x, "text", "text", examples=[["World"]])
|
||||
prediction, _ = process_examples.process_example(io, 0)
|
||||
self.assertEquals(prediction[0], "Hello World")
|
||||
|
||||
def test_caching(self):
|
||||
io = Interface(lambda x: "Hello " + x, "text", "text",
|
||||
examples=[["World"], ["Dunya"], ["Monde"]])
|
||||
io = Interface(
|
||||
lambda x: "Hello " + x,
|
||||
"text",
|
||||
"text",
|
||||
examples=[["World"], ["Dunya"], ["Monde"]],
|
||||
)
|
||||
io.launch(prevent_thread_lock=True)
|
||||
process_examples.cache_interface_examples(io)
|
||||
prediction = process_examples.load_from_cache(io, 1)
|
||||
|
@ -15,16 +15,17 @@ class TestQueuingOpenClose(unittest.TestCase):
|
||||
def test_init(self):
|
||||
queueing.init()
|
||||
self.assertTrue(os.path.exists(queueing.DB_FILE))
|
||||
os.remove(queueing.DB_FILE)
|
||||
|
||||
os.remove(queueing.DB_FILE)
|
||||
|
||||
def test_close(self):
|
||||
queueing.close()
|
||||
self.assertFalse(os.path.exists(queueing.DB_FILE))
|
||||
|
||||
|
||||
|
||||
class TestQueuingActions(unittest.TestCase):
|
||||
def setUp(self):
|
||||
queueing.init()
|
||||
|
||||
|
||||
def test_hashing(self):
|
||||
hash1 = queueing.generate_hash()
|
||||
hash2 = queueing.generate_hash()
|
||||
@ -43,26 +44,27 @@ class TestQueuingActions(unittest.TestCase):
|
||||
self.assertEquals(hash_popped, hash1)
|
||||
self.assertEquals(input_data, {"data": "test1"})
|
||||
self.assertEquals(action, "predict")
|
||||
|
||||
|
||||
def test_jobs(self):
|
||||
hash1, _ = queueing.push({"data": "test1"}, "predict")
|
||||
hash2, position = queueing.push({"data": "test1"}, "predict")
|
||||
self.assertEquals(position, 1)
|
||||
|
||||
|
||||
queueing.start_job(hash1)
|
||||
_, position = queueing.get_status(hash2)
|
||||
self.assertEquals(position, 1)
|
||||
queueing.pass_job(hash1, {"data": "result"})
|
||||
_, position = queueing.get_status(hash2)
|
||||
self.assertEquals(position, 0)
|
||||
|
||||
|
||||
queueing.start_job(hash2)
|
||||
queueing.fail_job(hash2, "failure")
|
||||
status, _ = queueing.get_status(hash2)
|
||||
self.assertEquals(status, "FAILED")
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
queueing.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -1,3 +1,4 @@
|
||||
packages/app/public/**
|
||||
pnpm-workspace.yaml
|
||||
packages/app/dist/**
|
||||
packages/app/dist/**
|
||||
pnpm-lock.yaml
|
6
ui/global.d.ts
vendored
Normal file
6
ui/global.d.ts
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
declare namespace svelte.JSX {
|
||||
interface DOMAttributes<T extends EventTarget> {
|
||||
theme?: string;
|
||||
"item-type"?: string;
|
||||
}
|
||||
}
|
@ -7,7 +7,7 @@
|
||||
content="width=device-width, initial-scale=1, shrink-to-fit=no"
|
||||
/>
|
||||
|
||||
<script type="module" src="./src/main.js"></script>
|
||||
<script type="module" src="./src/main.ts"></script>
|
||||
<title>{{ config['title'] or 'Gradio' }}</title>
|
||||
<meta property="og:url" content="https://gradio.app/" />
|
||||
<meta property="og:type" content="website" />
|
||||
@ -25,10 +25,12 @@
|
||||
content="{{ config['simple_description'] or '' }}"
|
||||
/>
|
||||
<meta name="twitter:image" content="{{ config['thumbnail'] or '' }}" />
|
||||
{%if config['analytics_enabled'] %}
|
||||
<script
|
||||
async
|
||||
src="https://www.googletagmanager.com/gtag/js?id=UA-156449732-1"
|
||||
></script>
|
||||
{% endif %}
|
||||
<script>
|
||||
window.dataLayer = window.dataLayer || [];
|
||||
function gtag() {
|
||||
|
@ -16,6 +16,9 @@
|
||||
"vite": "^2.7.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"@types/d3-dsv": "^3.0.0",
|
||||
"@types/d3-scale": "^4.0.2",
|
||||
"@types/d3-shape": "^3.0.2",
|
||||
"autoprefixer": "^9.8.8",
|
||||
"cropperjs": "^1.5.12",
|
||||
"d3-dsv": "^3.0.1",
|
||||
|
@ -1,23 +1,36 @@
|
||||
<script>
|
||||
<script context="module" lang="ts">
|
||||
interface CustomWindow extends Window {
|
||||
gradio_mode: "app" | "website";
|
||||
}
|
||||
|
||||
declare let window: CustomWindow;
|
||||
</script>
|
||||
|
||||
<script lang="ts">
|
||||
import Interface from "./Interface.svelte";
|
||||
import "./global.css";
|
||||
|
||||
export let title;
|
||||
export let description;
|
||||
export let article;
|
||||
export let theme;
|
||||
export let dark;
|
||||
export let input_components;
|
||||
export let output_components;
|
||||
export let examples;
|
||||
export let fn;
|
||||
export let root;
|
||||
export let space;
|
||||
export let allow_flagging;
|
||||
export let allow_interpretation;
|
||||
export let live;
|
||||
export let queue;
|
||||
export let static_src;
|
||||
interface Component {
|
||||
name: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export let title: string;
|
||||
export let description: string;
|
||||
export let article: string;
|
||||
export let theme: string;
|
||||
export let dark: boolean;
|
||||
export let input_components: Array<Component>;
|
||||
export let output_components: Array<Component>;
|
||||
export let examples: Array<Array<unknown>>;
|
||||
export let fn: (...args: any) => Promise<unknown>;
|
||||
export let root: string;
|
||||
export let space: string | undefined = undefined;
|
||||
export let allow_flagging: string;
|
||||
export let allow_interpretation: boolean;
|
||||
export let live: boolean;
|
||||
export let queue: boolean;
|
||||
export let static_src: string;
|
||||
|
||||
$: embedded = space !== undefined;
|
||||
</script>
|
||||
@ -66,7 +79,7 @@
|
||||
href={"https://huggingface.co/spaces/" + space}
|
||||
class="font-semibold"
|
||||
>
|
||||
{space.includes("/")
|
||||
{space && space.includes("/")
|
||||
? space[space.indexOf("/") + 1].toUpperCase() +
|
||||
space.substring(space.indexOf("/") + 2)
|
||||
: space}
|
||||
|
@ -1,13 +1,18 @@
|
||||
<script>
|
||||
import { input_component_map } from "./components/directory.js";
|
||||
<script lang="ts">
|
||||
import { input_component_map } from "./components/directory";
|
||||
|
||||
export let examples,
|
||||
examples_dir,
|
||||
example_id,
|
||||
setExampleId,
|
||||
examples_per_page,
|
||||
input_components,
|
||||
theme;
|
||||
interface Component {
|
||||
name: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export let examples: Array<Array<unknown>>;
|
||||
export let examples_dir: string;
|
||||
export let example_id: number | undefined;
|
||||
export let setExampleId: Function;
|
||||
export let examples_per_page: number;
|
||||
export let input_components: Array<Component>;
|
||||
export let theme: string;
|
||||
|
||||
let selected_examples = examples;
|
||||
let gallery = input_components.length === 1;
|
||||
@ -42,8 +47,8 @@
|
||||
>
|
||||
<thead class="border-b-2 dark:border-gray-600">
|
||||
<tr>
|
||||
{#each input_components as input_component, i}
|
||||
<th class="py-2 px-4" key={i}>
|
||||
{#each input_components as input_component}
|
||||
<th class="py-2 px-4">
|
||||
{input_component.label}
|
||||
</th>
|
||||
{/each}
|
||||
@ -53,7 +58,6 @@
|
||||
{#each selected_examples as example_row, i}
|
||||
<tr
|
||||
class="cursor-pointer transition"
|
||||
key={i}
|
||||
class:selected={i === example_id}
|
||||
on:click={() => setExampleId(i)}
|
||||
>
|
||||
|
@ -1,57 +1,63 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import {
|
||||
input_component_map,
|
||||
output_component_map
|
||||
} from "./components/directory.js";
|
||||
import { deepCopy } from "./components/utils/helpers.js";
|
||||
} from "./components/directory";
|
||||
import { deepCopy } from "./components/utils/helpers";
|
||||
import ExampleSet from "./ExampleSet.svelte";
|
||||
|
||||
export let input_components;
|
||||
export let output_components;
|
||||
export let theme;
|
||||
export let fn;
|
||||
export let examples;
|
||||
export let root;
|
||||
export let allow_flagging;
|
||||
export let allow_interpretation;
|
||||
export let avg_durations;
|
||||
export let live;
|
||||
export let queue;
|
||||
export let static_src;
|
||||
interface Component {
|
||||
name: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export let input_components: Array<Component>;
|
||||
export let output_components: Array<Component>;
|
||||
export let theme: string;
|
||||
export let fn: (...args: any) => Promise<unknown>;
|
||||
export let examples: Array<Array<unknown>>;
|
||||
export let root: string;
|
||||
export let allow_flagging: string;
|
||||
export let allow_interpretation: boolean;
|
||||
export let avg_durations: undefined | Array<number> = undefined;
|
||||
export let live: boolean;
|
||||
export let queue: boolean;
|
||||
export let static_src: string;
|
||||
|
||||
let examples_dir = root + "file/";
|
||||
let interpret_mode = false;
|
||||
let submission_count = 0;
|
||||
let state = "START";
|
||||
let last_duration = null;
|
||||
let last_duration: number | null = null;
|
||||
let has_changed = false;
|
||||
let queue_index = null;
|
||||
let initial_queue_index = null;
|
||||
let queue_index: number | null = null;
|
||||
let initial_queue_index: number | null = null;
|
||||
|
||||
const default_inputs = input_components.map((component) =>
|
||||
const default_inputs: Array<unknown> = input_components.map((component) =>
|
||||
"default" in component ? component.default : null
|
||||
);
|
||||
console.log(default_inputs);
|
||||
const default_outputs = new Array(output_components.length).fill(null);
|
||||
|
||||
let input_values = deepCopy(default_inputs);
|
||||
let input_values: Array<unknown> = deepCopy(default_inputs);
|
||||
let output_values = deepCopy(default_outputs);
|
||||
let interpretation_values = [];
|
||||
let timer = null;
|
||||
let interpretation_values: Array<unknown> = [];
|
||||
let timer: NodeJS.Timeout = null;
|
||||
let timer_start = 0;
|
||||
let timer_diff = 0;
|
||||
let avg_duration = Array.isArray(avg_durations)
|
||||
? this.props.avg_durations[0]
|
||||
: null;
|
||||
let expected_duration = null;
|
||||
let avg_duration = Array.isArray(avg_durations) ? avg_durations[0] : null;
|
||||
let expected_duration: number | null = null;
|
||||
console.log({ interpretation_values });
|
||||
|
||||
const setValues = (index, value) => {
|
||||
const setValues = (index: number, value: unknown) => {
|
||||
has_changed = true;
|
||||
input_values[index] = value;
|
||||
if (live && state !== "PENDING") {
|
||||
submit();
|
||||
}
|
||||
};
|
||||
const setExampleId = async (example_id) => {
|
||||
|
||||
const setExampleId = async (example_id: number) => {
|
||||
input_components.forEach(async (input_component, i) => {
|
||||
const process_example =
|
||||
input_component_map[input_component.name].process_example;
|
||||
@ -65,6 +71,7 @@
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const startTimer = () => {
|
||||
timer_start = Date.now();
|
||||
timer_diff = 0;
|
||||
@ -72,9 +79,11 @@
|
||||
timer_diff = (Date.now() - timer_start) / 1000;
|
||||
}, 100);
|
||||
};
|
||||
|
||||
const stopTimer = () => {
|
||||
clearInterval(timer);
|
||||
};
|
||||
|
||||
const submit = () => {
|
||||
if (state === "PENDING") {
|
||||
return;
|
||||
@ -173,7 +182,8 @@
|
||||
});
|
||||
}
|
||||
};
|
||||
const queueCallback = (index, is_initial) => {
|
||||
|
||||
const queueCallback = (index: number, is_initial: boolean) => {
|
||||
if (is_initial) {
|
||||
initial_queue_index = index;
|
||||
}
|
||||
@ -203,7 +213,7 @@
|
||||
interpretation={interpret_mode
|
||||
? interpretation_values[i]
|
||||
: null}
|
||||
setValue={setValues.bind(this, i)}
|
||||
setValue={(value) => setValues(i, value)}
|
||||
/>
|
||||
</div>
|
||||
{/if}
|
||||
|
@ -1,5 +1,5 @@
|
||||
<script>
|
||||
export let root;
|
||||
<script lang="ts">
|
||||
export let root: string;
|
||||
</script>
|
||||
|
||||
<div class="login container mt-8">
|
||||
|
@ -1,10 +1,10 @@
|
||||
function delay(n) {
|
||||
function delay(n: number) {
|
||||
return new Promise(function (resolve) {
|
||||
setTimeout(resolve, n * 1000);
|
||||
});
|
||||
}
|
||||
|
||||
let postData = async (url, body) => {
|
||||
let postData = async (url: string, body: unknown) => {
|
||||
const output = await fetch(url, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(body),
|
||||
@ -13,7 +13,13 @@ let postData = async (url, body) => {
|
||||
return output;
|
||||
};
|
||||
|
||||
export const fn = async (api_endpoint, action, data, queue, queue_callback) => {
|
||||
export const fn = async (
|
||||
api_endpoint: string,
|
||||
action: string,
|
||||
data: Record<string, unknown>,
|
||||
queue: boolean,
|
||||
queue_callback: (pos: number | null, is_initial?: boolean) => void
|
||||
) => {
|
||||
if (queue && ["predict", "interpret"].includes(action)) {
|
||||
data["action"] = action;
|
||||
const output = await postData(api_endpoint + "queue/push/", data);
|
@ -1,6 +1,5 @@
|
||||
<script>
|
||||
export let value, setValue, theme;
|
||||
export let choices;
|
||||
<script lang="ts">
|
||||
export let theme: string;
|
||||
</script>
|
||||
|
||||
<div class="dummy" {theme}>DUMMY</div>
|
||||
|
@ -25,8 +25,6 @@ import OutputTextbox from "./output/Textbox/config.js";
|
||||
import OutputVideo from "./output/Video/config.js";
|
||||
import OutputTimeSeries from "./output/TimeSeries/config.js";
|
||||
|
||||
import Dummy from "./Dummy.svelte";
|
||||
|
||||
export const input_component_map = {
|
||||
audio: InputAudio,
|
||||
checkbox: InputCheckbox,
|
@ -1,31 +1,34 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import type { Value } from "./types";
|
||||
|
||||
import { onDestroy } from "svelte";
|
||||
import Upload from "../../utils/Upload.svelte";
|
||||
import ModifyUpload from "../../utils/ModifyUpload.svelte";
|
||||
//@ts-ignore
|
||||
import Range from "svelte-range-slider-pips";
|
||||
|
||||
export let value,
|
||||
setValue,
|
||||
theme,
|
||||
name,
|
||||
static_src,
|
||||
is_example = false;
|
||||
export let source;
|
||||
export let value: null | Value;
|
||||
export let setValue: (val: typeof value) => typeof value;
|
||||
export let theme: string;
|
||||
export let name: string;
|
||||
export let static_src: string;
|
||||
export let is_example: boolean = false;
|
||||
export let source: "microphone" | "upload";
|
||||
|
||||
let recording = false;
|
||||
let recorder;
|
||||
let recorder: MediaRecorder;
|
||||
let mode = "";
|
||||
let audio_chunks = [];
|
||||
let audio_chunks: Array<Blob> = [];
|
||||
let audio_blob;
|
||||
let player;
|
||||
let inited = false;
|
||||
let crop_values = [0, 100];
|
||||
|
||||
function blob_to_data_url(blob) {
|
||||
function blob_to_data_url(blob: Blob): Promise<string> {
|
||||
return new Promise((fulfill, reject) => {
|
||||
let reader = new FileReader();
|
||||
reader.onerror = reject;
|
||||
reader.onload = (e) => fulfill(reader.result);
|
||||
reader.onload = (e) => fulfill(reader.result as string);
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
}
|
||||
@ -74,7 +77,7 @@
|
||||
mode = "";
|
||||
}
|
||||
|
||||
function loaded(node) {
|
||||
function loaded(node: HTMLAudioElement) {
|
||||
function clamp_playback() {
|
||||
const start_time = (crop_values[0] / 100) * node.duration;
|
||||
const end_time = (crop_values[1] / 100) * node.duration;
|
||||
@ -94,6 +97,22 @@
|
||||
destroy: () => node.removeEventListener("timeupdate", clamp_playback)
|
||||
};
|
||||
}
|
||||
|
||||
function handle_change({
|
||||
detail: { values }
|
||||
}: {
|
||||
detail: { values: [number, number] };
|
||||
}) {
|
||||
if (!value?.data) return;
|
||||
|
||||
setValue({
|
||||
data: value.data,
|
||||
name,
|
||||
is_example,
|
||||
crop_min: values[0],
|
||||
crop_max: values[1]
|
||||
});
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="input-audio">
|
||||
@ -146,14 +165,7 @@
|
||||
min={0}
|
||||
max={100}
|
||||
step={1}
|
||||
on:change={({ detail: { values } }) =>
|
||||
setValue({
|
||||
data: value.data,
|
||||
name,
|
||||
is_example,
|
||||
crop_min: values[0],
|
||||
crop_max: values[1]
|
||||
})}
|
||||
on:change={handle_change}
|
||||
/>
|
||||
{/if}
|
||||
{/if}
|
@ -1,5 +1,7 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
import type { Value } from "./types";
|
||||
|
||||
export let value: Value;
|
||||
</script>
|
||||
|
||||
<div class="input-audio-example">{value}</div>
|
||||
|
@ -1,6 +1,12 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import type { Value } from "./types";
|
||||
|
||||
import { getSaliencyColor } from "../../utils/helpers";
|
||||
export let value, interpretation, theme;
|
||||
export let value: Value;
|
||||
export let interpretation: Array<number>;
|
||||
export let theme: string;
|
||||
|
||||
$: console.log({ value, interpretation, theme });
|
||||
</script>
|
||||
|
||||
<div class="input-audio" {theme}>
|
||||
|
@ -1,4 +1,4 @@
|
||||
import Component from "./Component.svelte";
|
||||
import Component from "./Audio.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import Interpretation from "./Interpretation.svelte";
|
||||
import { loadAsFile } from "../../utils/example_processors";
|
8
ui/packages/app/src/components/input/Audio/types.ts
Normal file
8
ui/packages/app/src/components/input/Audio/types.ts
Normal file
@ -0,0 +1,8 @@
|
||||
export interface Value {
|
||||
data: string;
|
||||
is_example: boolean;
|
||||
name: string;
|
||||
size?: number;
|
||||
crop_min?: number;
|
||||
crop_max?: number;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
export let value: boolean;
|
||||
</script>
|
||||
|
||||
<div class="input-checkbox-example">{value.toLocaleString()}</div>
|
||||
|
@ -1,7 +1,9 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import { getSaliencyColor } from "../../utils/helpers";
|
||||
|
||||
export let value, interpretation, theme;
|
||||
export let value: boolean;
|
||||
export let interpretation: [number, number];
|
||||
export let theme: string;
|
||||
</script>
|
||||
|
||||
<div class="input-checkbox inline-block" {theme}>
|
||||
|
@ -1,4 +1,4 @@
|
||||
import Component from "./Component.svelte";
|
||||
import Component from "./Checkbox.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import Interpretation from "./Interpretation.svelte";
|
||||
|
@ -1,8 +1,11 @@
|
||||
<script>
|
||||
export let value, setValue, theme;
|
||||
export let choices;
|
||||
<script lang="ts">
|
||||
export let value: Array<string>;
|
||||
export let setValue: (val: typeof value) => typeof value;
|
||||
export let choices: Array<string>;
|
||||
|
||||
const toggleChoice = (choice) => {
|
||||
export let theme: string;
|
||||
|
||||
const toggleChoice = (choice: string) => {
|
||||
if (value.includes(choice)) {
|
||||
value.splice(value.indexOf(choice), 1);
|
||||
} else {
|
||||
@ -17,7 +20,6 @@
|
||||
<button
|
||||
class="checkbox-item py-2 px-3 font-semibold rounded cursor-pointer flex items-center gap-2"
|
||||
class:selected={value.includes(choice)}
|
||||
key={i}
|
||||
on:click={() => toggleChoice(choice)}
|
||||
>
|
||||
<div class="checkbox w-4 h-4 bg-white flex items-center justify-center">
|
@ -1,5 +1,5 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
export let value: Array<string>;
|
||||
</script>
|
||||
|
||||
<div class="input-checkboxgroup-example">{value.join(", ")}</div>
|
||||
|
@ -1,8 +1,11 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import { getSaliencyColor } from "../../utils/helpers";
|
||||
|
||||
export let value, interpretation, theme;
|
||||
export let choices;
|
||||
export let value: Array<string>;
|
||||
export let interpretation: Array<[number, number]>;
|
||||
export let choices: Array<string>;
|
||||
|
||||
export let theme: string;
|
||||
</script>
|
||||
|
||||
<div class="input-checkbox-group flex flex-wrap gap-2" {theme}>
|
||||
@ -10,7 +13,6 @@
|
||||
<button
|
||||
class="checkbox-item py-2 px-3 font-semibold rounded cursor-pointer flex items-center gap-1"
|
||||
class:selected={value.includes(choice)}
|
||||
key={i}
|
||||
>
|
||||
<div
|
||||
class="checkbox w-4 h-4 bg-white flex items-center justify-center border border-gray-400 box-border"
|
||||
|
@ -0,0 +1,9 @@
|
||||
import Component from "./CheckboxGroup.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import Interpretation from "./Interpretation.svelte";
|
||||
|
||||
export default {
|
||||
component: Component,
|
||||
example: ExampleComponent,
|
||||
interpretation: Interpretation
|
||||
};
|
@ -1,28 +1,33 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import { tick } from "svelte";
|
||||
|
||||
export let theme = "";
|
||||
export let label = "Title";
|
||||
export let headers = [];
|
||||
export let values = [
|
||||
export let headers: Array<string> = [];
|
||||
export let values: Array<Array<string | number>> = [
|
||||
["Frank", 32, "Male"],
|
||||
["Beatrice", 99, "Female"],
|
||||
["Simone", 999, "Male"]
|
||||
];
|
||||
export let setValue;
|
||||
|
||||
export let setValue: (val: typeof values) => typeof values;
|
||||
export let editable = true;
|
||||
|
||||
let id = 0;
|
||||
let editing = false;
|
||||
let selected = false;
|
||||
let els = {};
|
||||
let editing: boolean | number = false;
|
||||
let selected: boolean | number = false;
|
||||
let els: Record<
|
||||
string,
|
||||
{ cell: null | HTMLTableCellElement; input: null | HTMLInputElement }
|
||||
> = {};
|
||||
|
||||
function make_headers(_h) {
|
||||
type Headers = Array<{ value: string; id: number }>;
|
||||
|
||||
function make_headers(_h: Array<string>): Headers {
|
||||
if (_h.length === 0) {
|
||||
return values[0].map((_, i) => {
|
||||
const _id = ++id;
|
||||
els[_id] = { cell: null, input: null };
|
||||
return { id: _id, value: i + 1 };
|
||||
return { id: _id, value: JSON.stringify(i + 1) };
|
||||
});
|
||||
} else {
|
||||
return _h.map((h) => {
|
||||
@ -55,22 +60,32 @@
|
||||
|
||||
$: setValue(data.map((r) => r.map(({ value }) => value)));
|
||||
|
||||
function get_sort_status(name, sort) {
|
||||
function get_sort_status(
|
||||
name: string,
|
||||
sort: number,
|
||||
direction?: SortDirection
|
||||
) {
|
||||
if (!sort) return "none";
|
||||
if (sort[0] === name) {
|
||||
return sort[1];
|
||||
if (headers[sort] === name) {
|
||||
if (direction === "asc") return "ascending";
|
||||
if (direction === "des") return "descending";
|
||||
}
|
||||
}
|
||||
|
||||
async function start_edit(id) {
|
||||
async function start_edit(id: number) {
|
||||
if (!editable) return;
|
||||
editing = id;
|
||||
await tick();
|
||||
const { input } = els[id];
|
||||
input.focus();
|
||||
input?.focus();
|
||||
}
|
||||
|
||||
function handle_keydown(event, i, j, id) {
|
||||
function handle_keydown(
|
||||
event: KeyboardEvent,
|
||||
i: number,
|
||||
j: number,
|
||||
id: number
|
||||
) {
|
||||
let is_data;
|
||||
switch (event.key) {
|
||||
case "ArrowRight":
|
||||
@ -116,18 +131,22 @@
|
||||
}
|
||||
}
|
||||
|
||||
async function handle_cell_click(id) {
|
||||
async function handle_cell_click(id: number) {
|
||||
editing = false;
|
||||
selected = id;
|
||||
}
|
||||
|
||||
async function set_focus(id, type) {
|
||||
async function set_focus(id: number | boolean, type: "edit" | "select") {
|
||||
if (type === "edit" && typeof id == "number") {
|
||||
await tick();
|
||||
els[id].input.focus();
|
||||
els[id].input?.focus();
|
||||
}
|
||||
|
||||
if (type === "edit" && typeof id == "boolean") {
|
||||
if (
|
||||
type === "edit" &&
|
||||
typeof id == "boolean" &&
|
||||
typeof selected === "number"
|
||||
) {
|
||||
let cell = els[selected]?.cell;
|
||||
await tick();
|
||||
cell?.focus();
|
||||
@ -135,19 +154,20 @@
|
||||
|
||||
if (type === "select" && typeof id == "number") {
|
||||
const { cell } = els[id];
|
||||
cell.setAttribute("tabindex", 0);
|
||||
cell?.setAttribute("tabindex", "0");
|
||||
await tick();
|
||||
els[id].cell.focus();
|
||||
els[id].cell?.focus();
|
||||
}
|
||||
}
|
||||
|
||||
$: set_focus(editing, "edit");
|
||||
$: set_focus(selected, "select");
|
||||
|
||||
let sort_direction;
|
||||
let sort_by;
|
||||
type SortDirection = "asc" | "des";
|
||||
let sort_direction: SortDirection;
|
||||
let sort_by: number;
|
||||
|
||||
function sort(col, dir) {
|
||||
function sort(col: number, dir: SortDirection) {
|
||||
if (dir === "asc") {
|
||||
data = data.sort((a, b) => (a[col].value < b[col].value ? -1 : 1));
|
||||
} else if (dir === "des") {
|
||||
@ -155,7 +175,7 @@
|
||||
}
|
||||
}
|
||||
|
||||
function handle_sort(col) {
|
||||
function handle_sort(col: number) {
|
||||
if (typeof sort_by !== "number" || sort_by !== col) {
|
||||
sort_direction = "asc";
|
||||
sort_by = col;
|
||||
@ -170,18 +190,18 @@
|
||||
sort(col, sort_direction);
|
||||
}
|
||||
|
||||
let header_edit;
|
||||
let header_edit: number | boolean;
|
||||
|
||||
async function edit_header(_id, select) {
|
||||
async function edit_header(_id: number, select?: boolean) {
|
||||
if (!editable) return;
|
||||
header_edit = _id;
|
||||
await tick();
|
||||
els[_id].input.focus();
|
||||
els[_id].input?.focus();
|
||||
|
||||
if (select) els[_id].input.select();
|
||||
if (select) els[_id].input?.select();
|
||||
}
|
||||
|
||||
function end_header_edit(event) {
|
||||
function end_header_edit(event: KeyboardEvent) {
|
||||
if (!editable) return;
|
||||
|
||||
switch (event.key) {
|
||||
@ -225,10 +245,13 @@
|
||||
edit_header(_id, true);
|
||||
}
|
||||
|
||||
const double_click = (node, { click, dblclick }) => {
|
||||
let timer;
|
||||
const double_click = (
|
||||
node: HTMLElement,
|
||||
{ click, dblclick }: { click: Function; dblclick: Function }
|
||||
) => {
|
||||
let timer: NodeJS.Timeout | undefined;
|
||||
|
||||
function handler(event) {
|
||||
function handler(event: MouseEvent) {
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
timer = undefined;
|
||||
@ -266,7 +289,7 @@
|
||||
click: () => handle_sort(i),
|
||||
dblclick: () => edit_header(id)
|
||||
}}
|
||||
aria-sort={get_sort_status(value, sort_by)}
|
||||
aria-sort={get_sort_status(value, sort_by, sort_direction)}
|
||||
class="relative after:absolute after:opacity-0 after:content-['▲'] after:ml-2 after:inset-y-0 after:h-[1.05rem] after:m-auto relative px-6 py-3 text-left text-xs font-medium text-gray-500 uppercase tracking-wider"
|
||||
class:sorted={sort_by === i}
|
||||
class:des={sort_by === i && sort_direction === "des"}
|
||||
@ -279,7 +302,7 @@
|
||||
bind:this={els[id].input}
|
||||
on:keydown={end_header_edit}
|
||||
on:blur={({ currentTarget }) =>
|
||||
currentTarget.setAttribute("tabindex", -1)}
|
||||
currentTarget.setAttribute("tabindex", "-1")}
|
||||
/>
|
||||
{/if}
|
||||
<span
|
||||
@ -303,7 +326,7 @@
|
||||
on:keydown={(e) => handle_keydown(e, i, j, id)}
|
||||
bind:this={els[id].cell}
|
||||
on:blur={({ currentTarget }) =>
|
||||
currentTarget.setAttribute("tabindex", -1)}
|
||||
currentTarget.setAttribute("tabindex", "-1")}
|
||||
>
|
||||
<div
|
||||
class:border-gray-600={selected === id}
|
||||
@ -317,7 +340,7 @@
|
||||
bind:value
|
||||
bind:this={els[id].input}
|
||||
on:blur={({ currentTarget }) =>
|
||||
currentTarget.setAttribute("tabindex", -1)}
|
||||
currentTarget.setAttribute("tabindex", "-1")}
|
||||
/>
|
||||
{/if}
|
||||
<span
|
@ -1,5 +0,0 @@
|
||||
import Component from "./Component.svelte";
|
||||
|
||||
export default {
|
||||
component: Component
|
||||
};
|
5
ui/packages/app/src/components/input/DataFrame/config.ts
Normal file
5
ui/packages/app/src/components/input/DataFrame/config.ts
Normal file
@ -0,0 +1,5 @@
|
||||
import Component from "./DataFrame.svelte";
|
||||
|
||||
export default {
|
||||
component: Component
|
||||
};
|
@ -1,6 +1,8 @@
|
||||
<script>
|
||||
export let value, setValue, theme;
|
||||
export let choices;
|
||||
<script lang="ts">
|
||||
export let value: string;
|
||||
export let setValue: (val: typeof value) => typeof value;
|
||||
export let theme: string;
|
||||
export let choices: Array<string>;
|
||||
</script>
|
||||
|
||||
<div class="input-dropdown group inline-block relative" {theme}>
|
||||
@ -22,7 +24,6 @@
|
||||
<li
|
||||
class="dropdown-item first:rounded-t transition last:rounded-b py-2 px-3 block whitespace-nowrap cursor-pointer"
|
||||
on:click={() => setValue(choice)}
|
||||
key={i}
|
||||
>
|
||||
{choice}
|
||||
</li>
|
@ -1,5 +1,5 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
export let value: string;
|
||||
</script>
|
||||
|
||||
<div class="input-dropdown-example">{value}</div>
|
||||
|
@ -1,8 +1,10 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import { getSaliencyColor } from "../../utils/helpers";
|
||||
|
||||
export let value, interpretation, theme;
|
||||
export let choices;
|
||||
export let value: string;
|
||||
export let interpretation: Array<number>;
|
||||
export let theme: string;
|
||||
export let choices: Array<string>;
|
||||
</script>
|
||||
|
||||
<div class="input-dropdown" {theme}>
|
||||
@ -11,7 +13,6 @@
|
||||
<li
|
||||
class="dropdown-item first:rounded-t transition last:rounded-b py-2 px-3 block whitespace-nowrap cursor-pointer"
|
||||
style={"background-color: " + getSaliencyColor(interpretation[i])}
|
||||
key={i}
|
||||
>
|
||||
{choice}
|
||||
</li>
|
||||
|
@ -1,4 +1,4 @@
|
||||
import Component from "./Component.svelte";
|
||||
import Component from "./Dropdown.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import Interpretation from "./Interpretation.svelte";
|
||||
|
@ -1,5 +1,6 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
import type { FileData } from "./types";
|
||||
export let value: FileData;
|
||||
</script>
|
||||
|
||||
<div class="input-file-example">{value}</div>
|
||||
|
@ -1,9 +1,15 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import type { FileData } from "./types";
|
||||
import Upload from "../../utils/Upload.svelte";
|
||||
import ModifyUpload from "../../utils/ModifyUpload.svelte";
|
||||
import { prettyBytes } from "../../utils/helpers";
|
||||
|
||||
export let value, setValue, theme, static_src;
|
||||
export let value: null | FileData;
|
||||
export let setValue: (
|
||||
val: Array<string | FileData> | string | FileData | null
|
||||
) => Array<string | FileData> | string | FileData | null;
|
||||
export let theme: string;
|
||||
export let static_src: string;
|
||||
</script>
|
||||
|
||||
<div class="input-file" {theme}>
|
||||
@ -18,6 +24,7 @@
|
||||
class="file-preview w-full flex flex-row flex-wrap justify-center items-center relative"
|
||||
>
|
||||
<ModifyUpload clear={() => setValue(null)} {theme} {static_src} />
|
||||
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
class="h-10 w-1/5"
|
@ -1,4 +1,4 @@
|
||||
import Component from "./Component.svelte";
|
||||
import Component from "./File.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import { loadAsFile } from "../../utils/example_processors";
|
||||
|
6
ui/packages/app/src/components/input/File/types.ts
Normal file
6
ui/packages/app/src/components/input/File/types.ts
Normal file
@ -0,0 +1,6 @@
|
||||
export interface FileData {
|
||||
name: string;
|
||||
size: number;
|
||||
data: string;
|
||||
is_example: false;
|
||||
}
|
@ -1,5 +1,6 @@
|
||||
<script>
|
||||
export let value, examples_dir;
|
||||
<script lang="ts">
|
||||
export let value: string;
|
||||
export let examples_dir: string;
|
||||
</script>
|
||||
|
||||
<!-- svelte-ignore a11y-missing-attribute -->
|
||||
|
@ -1,4 +1,4 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import Cropper from "../../utils/Cropper.svelte";
|
||||
|
||||
import Upload from "../../utils/Upload.svelte";
|
||||
@ -7,17 +7,32 @@
|
||||
import ImageEditor from "../../utils/ImageEditor.svelte";
|
||||
import Sketch from "../../utils/Sketch.svelte";
|
||||
import Webcam from "../../utils/Webcam.svelte";
|
||||
export let value, setValue, theme, static_src;
|
||||
export let source = "upload";
|
||||
export let tool = "editor";
|
||||
export let value: null | string;
|
||||
export let setValue: (val: typeof value) => typeof value;
|
||||
export let theme: string;
|
||||
export let static_src: string;
|
||||
export let source: "canvas" | "webcam" | "upload" = "upload";
|
||||
export let tool: "editor" | "select" = "editor";
|
||||
|
||||
let mode;
|
||||
let sketch;
|
||||
let mode: "edit" | "view" = "view";
|
||||
let sketch: Sketch;
|
||||
|
||||
function handle_save({ detail }) {
|
||||
interface FileData {
|
||||
name: string;
|
||||
size: number;
|
||||
data: string;
|
||||
is_example: false;
|
||||
}
|
||||
|
||||
function handle_save({ detail }: { detail: string }) {
|
||||
setValue(detail);
|
||||
mode = "view";
|
||||
}
|
||||
|
||||
function handle_load(val: string | FileData | (string | FileData)[] | null) {
|
||||
setValue(val as string);
|
||||
return val;
|
||||
}
|
||||
</script>
|
||||
|
||||
<div class="input-image">
|
||||
@ -40,7 +55,7 @@
|
||||
{#if source === "upload"}
|
||||
<Upload
|
||||
filetype="image/x-png,image/gif,image/jpeg"
|
||||
load={setValue}
|
||||
load={handle_load}
|
||||
include_file_metadata={false}
|
||||
{theme}
|
||||
>
|
@ -1,14 +1,20 @@
|
||||
<script>
|
||||
import ModifyUpload from "../../utils/ModifyUpload.svelte";
|
||||
<script lang="ts">
|
||||
import { getObjectFitSize, getSaliencyColor } from "../../utils/helpers";
|
||||
import { afterUpdate } from "svelte";
|
||||
|
||||
export let value, setValue, interpretation, shape, theme;
|
||||
export let value: string;
|
||||
export let interpretation: Array<Array<number>>;
|
||||
export let shape: undefined | [number, number];
|
||||
|
||||
let saliency_layer;
|
||||
let image;
|
||||
let saliency_layer: HTMLCanvasElement;
|
||||
let image: HTMLImageElement;
|
||||
|
||||
const paintSaliency = (data, ctx, width, height) => {
|
||||
const paintSaliency = (
|
||||
data: Array<Array<number>>,
|
||||
ctx: CanvasRenderingContext2D,
|
||||
width: number,
|
||||
height: number
|
||||
) => {
|
||||
var cell_width = width / data[0].length;
|
||||
var cell_height = height / data.length;
|
||||
var r = 0;
|
||||
@ -42,11 +48,11 @@
|
||||
}
|
||||
let width = size.width;
|
||||
let height = size.height;
|
||||
saliency_layer.setAttribute("height", height);
|
||||
saliency_layer.setAttribute("width", width);
|
||||
saliency_layer.setAttribute("height", `${height}`);
|
||||
saliency_layer.setAttribute("width", `${width}`);
|
||||
paintSaliency(
|
||||
interpretation,
|
||||
saliency_layer.getContext("2d"),
|
||||
saliency_layer.getContext("2d")!,
|
||||
width,
|
||||
height
|
||||
);
|
||||
|
@ -1,4 +1,4 @@
|
||||
import Component from "./Component.svelte";
|
||||
import Component from "./Image.svelte";
|
||||
import ExampleComponent from "./Example.svelte";
|
||||
import Interpretation from "./Interpretation.svelte";
|
||||
import { loadAsData } from "../../utils/example_processors";
|
@ -1,5 +1,5 @@
|
||||
<script>
|
||||
export let value;
|
||||
<script lang="ts">
|
||||
export let value: string;
|
||||
</script>
|
||||
|
||||
<div class="input-number-example">{value}</div>
|
||||
|
@ -1,7 +1,9 @@
|
||||
<script>
|
||||
<script lang="ts">
|
||||
import { getSaliencyColor } from "../../utils/helpers";
|
||||
|
||||
export let value, interpretation, theme;
|
||||
export let value: string;
|
||||
export let interpretation: Array<[number, number]>;
|
||||
export let theme: string;
|
||||
</script>
|
||||
|
||||
<div class="input-number">
|
||||
|
@ -1,12 +1,18 @@
|
||||
<script>
|
||||
export let value, setValue, theme;
|
||||
<script lang="ts">
|
||||
export let value: number;
|
||||
export let setValue: (val: number) => number;
|
||||
export let theme: string;
|
||||
|
||||
function handle_input(event: Event) {
|
||||
setValue(parseFloat((event.target as HTMLInputElement).value));
|
||||
}
|
||||
</script>
|
||||
|
||||
<input
|
||||
type="number"
|
||||
class="input-number w-full rounded box-border p-2 focus:outline-none appearance-none"
|
||||
{value}
|
||||
on:input={(e) => setValue(parseFloat(e.target.value))}
|
||||
on:input={handle_input}
|
||||
{theme}
|
||||
/>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user