diff --git a/demo/calculator.py b/demo/calculator.py index bc4120e501..b94df4dfab 100644 --- a/demo/calculator.py +++ b/demo/calculator.py @@ -1,6 +1,10 @@ import gradio as gr +import time +import random def calculator(num1, operation, num2): + print("->", num1, operation, num2) + time.sleep(10 + 2 * random.random()) if operation == "add": return num1 + num2 elif operation == "subtract": @@ -22,6 +26,7 @@ iface = gr.Interface(calculator, title="test calculator", description="heres a sample toy calculator. enjoy!", flagging_options=["this", "or", "that"], + enable_queue=True ) if __name__ == "__main__": diff --git a/frontend/src/components/input/video.jsx b/frontend/src/components/input/video.jsx index 0be1f00e22..7556ac4fc9 100644 --- a/frontend/src/components/input/video.jsx +++ b/frontend/src/components/input/video.jsx @@ -89,11 +89,7 @@ class VideoInput extends BaseComponent { class VideoInputExample extends ComponentExample { render() { - return ( - - {this.props.value} - - ); + return {this.props.value}; } } diff --git a/frontend/src/gradio.jsx b/frontend/src/gradio.jsx index dfc7e44f1a..7c1e5c1cda 100644 --- a/frontend/src/gradio.jsx +++ b/frontend/src/gradio.jsx @@ -55,7 +55,11 @@ export class GradioInterface extends React.Component { constructor(props) { super(props); this.state = this.get_default_state(); + this.pending_response = false; this.state["examples_page"] = 0; + this.state["avg_duration"] = Array.isArray(this.props.avg_durations) + ? this.props.avg_durations[0] + : null; this.examples_dir = process.env.REACT_APP_BACKEND_URL + (this.props.examples_dir === null @@ -77,21 +81,31 @@ export class GradioInterface extends React.Component { state["submitting"] = false; state["error"] = false; state["complete"] = false; + state["last_duration"] = null; state["interpretation"] = null; state["just_flagged"] = false; state["has_changed"] = false; state["example_id"] = null; state["flag_index"] = null; state["queue_index"] = null; + state["initial_queue_index"] = null; return state; }; clear = () => { + this.pending_response = false; this.setState(this.get_default_state()); }; submit = () => { + if (this.pending_response) { + return; + } + this.pending_response = true; let input_state = []; for (let i = 0; i < this.props.input_components.length; i++) { - if (this.state[i] === null && this.props.input_components[i].optional !== true) { + if ( + this.state[i] === null && + this.props.input_components[i].optional !== true + ) { return; } input_state[i] = this.state[i]; @@ -105,8 +119,14 @@ export class GradioInterface extends React.Component { this.props .fn(input_state, "predict", this.queueCallback) .then((output) => { + if (!this.pending_response) { + return; + } + this.pending_response = false; let index_start = this.props.input_components.length; let new_state = {}; + new_state["last_duration"] = output["durations"][0]; + new_state["avg_duration"] = output["avg_durations"][0]; for (let [i, value] of output["data"].entries()) { new_state[index_start + i] = value; } @@ -122,6 +142,7 @@ export class GradioInterface extends React.Component { }) .catch((e) => { console.error(e); + this.pending_response = false; this.setState({ error: true, submitting: false @@ -153,9 +174,10 @@ export class GradioInterface extends React.Component { this.props.fn(component_state, "flag"); }; interpret = () => { - if (!this.state.complete) { + if (this.pending_response) { return; } + this.pending_response = true; let input_state = []; for (let i = 0; i < this.props.input_components.length; i++) { if (this.state[i] === null) { @@ -167,6 +189,10 @@ export class GradioInterface extends React.Component { this.props .fn(input_state, "interpret", this.queueCallback) .then((output) => { + if (!this.pending_response) { + return; + } + this.pending_response = false; this.setState({ interpretation: output["interpretation_scores"], submitting: false @@ -174,6 +200,7 @@ export class GradioInterface extends React.Component { }) .catch((e) => { console.error(e); + this.pending_response = false; this.setState({ error: true, submitting: false @@ -183,8 +210,13 @@ export class GradioInterface extends React.Component { removeInterpret = () => { this.setState({ interpretation: null }); }; - queueCallback = (queue_index) => { - this.setState({ queue_index: queue_index }); + queueCallback = (queue_index, is_initial) => { + let new_state = {}; + if (is_initial === true) { + new_state["initial_queue_index"] = queue_index; + } + new_state["queue_index"] = queue_index; + this.setState(new_state); }; takeScreenshot = () => { html2canvas(ReactDOM.findDOMNode(this).parentNode).then((canvas) => { @@ -222,9 +254,14 @@ export class GradioInterface extends React.Component { render() { let status = false; if (this.state.submitting) { + let expected_duration = this.state.avg_duration; + if (this.state.initial_queue_index && this.state.avg_duration !== null) { + expected_duration *= this.state.initial_queue_index + 2; + } status = (
- {this.state.queue_index !== null + + {this.state.queue_index !== null && this.state.queue_index >= 0 ? "queued @ " + this.state.queue_index : false} loading @@ -236,6 +273,12 @@ export class GradioInterface extends React.Component { error
); + } else if (this.state.complete && this.state.last_duration !== null) { + status = ( +
+ {this.state.last_duration.toFixed(2) + "s"} +
+ ); } return (
@@ -288,7 +331,7 @@ export class GradioInterface extends React.Component { >
{status} @@ -369,10 +412,11 @@ export class GradioInterface extends React.Component {
{this.props.examples ? ( - this.props.examples_per_page; + let page_count = 1; + let visible_pages = []; + if (paginate) { + selected_examples = selected_examples.slice( + this.state.page * this.props.examples_per_page, + (this.state.page + 1) * this.props.examples_per_page + ); + page_count = Math.ceil(examples_count / this.props.examples_per_page); + [0, this.state.page, page_count - 1].forEach((anchor) => { + for (let i = anchor - 2; i <= anchor + 2; i++) { + if (i >= 0 && i < page_count && !visible_pages.includes(i)) { + if ( + visible_pages.length > 0 && + i - visible_pages[visible_pages.length - 1] > 1 + ) { + visible_pages.push(null); + } + visible_pages.push(i); + } + } + }); + } return (

Examples

-
Page:
- +
{this.props.input_components.map((component, i) => { @@ -412,7 +489,8 @@ class GradioInterfaceExamples extends React.Component { - {this.props.examples.map((example_row, i) => { + {selected_examples.map((example_row, page_i) => { + let i = page_i + this.state.page * this.props.examples_per_page; return (
+ {paginate ? ( +
+ Pages: + {visible_pages.map((page) => + page === null ? ( +
...
+ ) : ( + + ) + )} +
+ ) : ( + false + )}
); } } + +class GradioTimer extends React.Component { + constructor(props) { + super(props); + this.state = { time: new Date(), start_time: new Date() }; + } + + componentDidMount() { + this.timerID = setInterval(() => this.tick(), 1000); + } + + componentWillUnmount() { + clearInterval(this.timerID); + } + + tick() { + this.setState({ + time: new Date() + }); + } + + render() { + return ( +
+ {Math.round( + (this.state.time.getTime() - this.state.start_time.getTime()) / 1000 + )} + .0 + {this.props.expected_duration !== null ? ( + <>/{this.props.expected_duration.toFixed(1)} + ) : ( + false + )} + s +
+ ); + } +} + +const MemoizedGradioInterfaceExamples = React.memo(GradioInterfaceExamples); +const MemoizedGradioTimer = React.memo(GradioTimer); diff --git a/frontend/src/index.jsx b/frontend/src/index.jsx index f60abac2f2..3885e84f52 100644 --- a/frontend/src/index.jsx +++ b/frontend/src/index.jsx @@ -24,7 +24,12 @@ let fn = async (endpoint, queue, data, action, queue_callback) => { data: data, action: action }); - let hash = await output.text(); + const output_json = await output.json(); + let [hash, queue_position] = [ + output_json["hash"], + output_json["queue_position"] + ]; + queue_callback(queue_position, /*is_initial=*/ true); let status = "UNKNOWN"; while (status != "COMPLETE" && status != "FAILED") { if (status != "UNKNOWN") { diff --git a/frontend/src/themes/compact.scss b/frontend/src/themes/compact.scss index 314e000d63..b2498ab159 100644 --- a/frontend/src/themes/compact.scss +++ b/frontend/src/themes/compact.scss @@ -107,6 +107,15 @@ html { @apply py-2 px-4; } } + .pages { + @apply flex gap-1 items-center mt-2; + } + .page { + @apply px-2 py-1 bg-gray-100 rounded; + } + .page.selected { + @apply bg-gray-300; + } } /* Input Components */ .input_text { diff --git a/frontend/src/themes/defaults.scss b/frontend/src/themes/defaults.scss index 510224c181..f325cd46e3 100644 --- a/frontend/src/themes/defaults.scss +++ b/frontend/src/themes/defaults.scss @@ -45,7 +45,7 @@ .gradio_interface[theme="default"] { .loading { - @apply absolute right-1; + @apply absolute right-2 flex items-center gap-2; } .loading img { @apply h-5 ml-2 inline-block; @@ -160,6 +160,15 @@ @apply cursor-pointer p-2 rounded bg-gray-100; } } + .pages { + @apply flex gap-1 items-center mt-2; + } + .page { + @apply px-2 py-1 bg-gray-100 rounded; + } + .page.selected { + @apply bg-gray-300; + } } /* Input Components */ .input_text { diff --git a/frontend/src/themes/huggingface.scss b/frontend/src/themes/huggingface.scss index a4358fccca..7f7ad9d06e 100644 --- a/frontend/src/themes/huggingface.scss +++ b/frontend/src/themes/huggingface.scss @@ -4,7 +4,7 @@ .gradio_interface[theme="huggingface"] { .loading { - @apply absolute right-1; + @apply absolute right-2 flex items-center gap-2 text-sm text-gray-700; } .loading img { @apply h-5; @@ -26,6 +26,7 @@ } .component_set { @apply border border-gray-100 p-4 rounded-lg flex flex-col flex-1 gap-3 bg-gradient-to-br from-gray-50 to-white; + min-height: 36px; } .panel_header { @apply flex items-center text-sm text-gray-700 mb-1.5; @@ -56,12 +57,6 @@ .screenshot_set { @apply hidden flex hidden flex-grow; } - .panel_button.left_panel_button { - @apply rounded-tr-none rounded-br-none; - } - .panel_button.right_panel_button { - @apply rounded-tl-none rounded-bl-none bg-gray-100 hover:bg-gray-300; - } .examples { h4 { @apply text-lg font-semibold my-2; @@ -75,16 +70,13 @@ .shortcut { @apply block text-xs; } - .examples_control button { - @apply bg-gray-100 hover:bg-gray-200 p-2; - } .examples_table:not(.gallery) { - @apply table-auto p-2 bg-gray-100 mt-4 rounded; + @apply table-auto p-2 bg-gray-50 mt-4 rounded border border-gray-100; tbody tr { @apply cursor-pointer; } thead { - @apply border-b-2 border-gray-300; + @apply border-b-2 border-gray-100; } tbody tr:hover { @apply bg-indigo-500 text-white; @@ -112,9 +104,18 @@ @apply font-bold; } td { - @apply cursor-pointer p-2 rounded bg-gray-100; + @apply cursor-pointer p-2 rounded bg-gray-50 ; } } + .pages { + @apply flex gap-1 items-center mt-2; + } + .page { + @apply px-2 py-1 bg-gray-100 rounded; + } + .page.selected { + @apply bg-gray-300; + } } /* Input Components */ .input_text { diff --git a/gradio.egg-info/PKG-INFO b/gradio.egg-info/PKG-INFO index c25e66f258..cd9668054b 100644 --- a/gradio.egg-info/PKG-INFO +++ b/gradio.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.0 Name: gradio -Version: 2.2.15 +Version: 2.3.0a0 Summary: Python library for easily interacting with trained machine learning models Home-page: https://github.com/gradio-app/gradio-UI Author: Abubakar Abid diff --git a/gradio/frontend/asset-manifest.json b/gradio/frontend/asset-manifest.json index 3b454f81bd..9b1826c593 100644 --- a/gradio/frontend/asset-manifest.json +++ b/gradio/frontend/asset-manifest.json @@ -1,6 +1,6 @@ { "files": { - "main.css": "/static/css/main.8fde07f5.css", + "main.css": "/static/css/main.2ddd43af.css", "main.js": "/static/bundle.js", "index.html": "/index.html", "static/bundle.js.LICENSE.txt": "/static/bundle.js.LICENSE.txt", @@ -9,7 +9,7 @@ }, "entrypoints": [ "static/bundle.css", - "static/css/main.8fde07f5.css", + "static/css/main.2ddd43af.css", "static/bundle.js" ] } \ No newline at end of file diff --git a/gradio/frontend/index.html b/gradio/frontend/index.html index af2ccf3716..dde09d84a6 100644 --- a/gradio/frontend/index.html +++ b/gradio/frontend/index.html @@ -8,4 +8,4 @@ window.config = {{ config|tojson }}; } catch (e) { window.config = {}; - }Gradio
\ No newline at end of file + }Gradio
\ No newline at end of file diff --git a/gradio/interface.py b/gradio/interface.py index 506ac8e30e..7712a7e27c 100644 --- a/gradio/interface.py +++ b/gradio/interface.py @@ -123,6 +123,7 @@ class Interface: raise ValueError("Invalid value for parameter: interpretation") self.predict = fn + self.predict_durations = [[0, 0]] * len(fn) self.function_names = [func.__name__ for func in fn] self.__name__ = ", ".join(self.function_names) self.verbose = verbose @@ -335,14 +336,6 @@ class Interface: predictions[i]) if predictions[i] is not None else None for i, output_component in enumerate(self.output_components)] return processed_output, durations - def embed(self, processed_input): - if self.embedding == "default": - embeddings = np.concatenate([input_component.embed(processed_input[i]) - for i, input_component in enumerate(self.input_components)]) - else: - embeddings = self.embedding(*processed_input) - return embeddings - def interpret(self, raw_input): """ Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box diff --git a/gradio/networking.py b/gradio/networking.py index 1cbdfce062..8a15ddf027 100644 --- a/gradio/networking.py +++ b/gradio/networking.py @@ -178,7 +178,14 @@ def enable_sharing(path): def predict(): raw_input = request.json["data"] prediction, durations = app.interface.process(raw_input) - output = {"data": prediction, "durations": durations} + avg_durations = [] + for i, duration in enumerate(durations): + app.interface.predict_durations[i][0] += duration + app.interface.predict_durations[i][1] += 1 + avg_durations.append(app.interface.predict_durations[i][0] + / app.interface.predict_durations[i][1]) + app.interface.config["avg_durations"] = avg_durations + output = {"data": prediction, "durations": durations, "avg_durations": avg_durations} if app.interface.allow_flagging == "auto": try: flag_index = flag_data(raw_input, prediction, @@ -398,8 +405,8 @@ def file(path): def queue_push(): data = request.json["data"] action = request.json["action"] - job_hash = queue.push({"data": data}, action) - return job_hash + job_hash, queue_position = queue.push({"data": data}, action) + return {"hash": job_hash, "queue_position": queue_position} @app.route("/api/queue/status/", methods=["POST"]) @login_check diff --git a/gradio/queue.py b/gradio/queue.py index b642eff913..8a6265343c 100644 --- a/gradio/queue.py +++ b/gradio/queue.py @@ -7,16 +7,16 @@ DB_FILE = "gradio_queue.db" def generate_hash(): generate = True + conn = sqlite3.connect(DB_FILE) + c = conn.cursor() while generate: hash = uuid.uuid4().hex - conn = sqlite3.connect(DB_FILE) - c = conn.cursor() c.execute(""" SELECT hash FROM queue WHERE hash = ?; """, (hash,)) - conn.commit() generate = c.fetchone() is not None + conn.commit() return hash def init(): @@ -24,7 +24,6 @@ def init(): os.remove(DB_FILE) conn = sqlite3.connect(DB_FILE) c = conn.cursor() - c.execute("BEGIN EXCLUSIVE") c.execute("""CREATE TABLE queue ( queue_index integer PRIMARY KEY, hash text, @@ -71,22 +70,38 @@ def push(input_data, action): hash = generate_hash() conn = sqlite3.connect(DB_FILE) c = conn.cursor() - c.execute("BEGIN EXCLUSIVE") c.execute(""" INSERT INTO queue (hash, input_data, action) VALUES (?, ?, ?); - """, (hash, input_data, action)) + """, (hash, input_data, action)) + queue_index = c.lastrowid + c.execute(""" + SELECT COUNT(*) FROM queue WHERE queue_index < ? and popped = 0; + """, (queue_index,)) + queue_position = c.fetchone()[0] + if queue_position is None: + conn.commit() + raise ValueError("Hash not found.") + elif queue_position == 0: + c.execute(""" + SELECT COUNT(*) FROM jobs WHERE status = "PENDING"; + """) + result = c.fetchone() + if result[0] == 0: + queue_position -= 1 conn.commit() - return hash + return hash, queue_position def get_status(hash): conn = sqlite3.connect(DB_FILE) c = conn.cursor() - c.execute("BEGIN EXCLUSIVE") c.execute(""" SELECT queue_index, popped FROM queue WHERE hash = ?; """, (hash,)) result = c.fetchone() + if result is None: + conn.commit() + raise ValueError("Hash not found.") if result[1] == 1: # in jobs c.execute(""" SELECT status, output_data, error_message FROM jobs WHERE hash = ?; @@ -110,14 +125,22 @@ def get_status(hash): conn.commit() output_data = json.loads(output_data) return "COMPLETE", output_data - else: + else: # in queue queue_index = result[0] c.execute(""" SELECT COUNT(*) FROM queue WHERE queue_index < ? and popped = 0; """, (queue_index,)) result = c.fetchone() + queue_position = result[0] + if queue_position == 0: + c.execute(""" + SELECT COUNT(*) FROM jobs WHERE status = "PENDING"; + """) + result = c.fetchone() + if result[0] == 0: + queue_position -= 1 conn.commit() - return "QUEUED", result[0] + return "QUEUED", queue_position def start_job(hash): conn = sqlite3.connect(DB_FILE) @@ -134,7 +157,6 @@ def start_job(hash): def fail_job(hash, error_message): conn = sqlite3.connect(DB_FILE) c = conn.cursor() - c.execute("BEGIN EXCLUSIVE") c.execute(""" UPDATE jobs SET status = 'FAILED', error_message = ? WHERE hash = ?; """, (error_message, hash,)) @@ -144,7 +166,6 @@ def pass_job(hash, output_data): output_data = json.dumps(output_data) conn = sqlite3.connect(DB_FILE) c = conn.cursor() - c.execute("BEGIN EXCLUSIVE") c.execute(""" UPDATE jobs SET status = 'COMPLETE', output_data = ? WHERE hash = ?; """, (output_data, hash,)) diff --git a/gradio/version.txt b/gradio/version.txt index 5bd8c5430c..4defb404c3 100644 --- a/gradio/version.txt +++ b/gradio/version.txt @@ -1 +1 @@ -2.2.15 +2.3.0a diff --git a/setup.py b/setup.py index e73c177302..13b97e7e0e 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ except ImportError: setup( name='gradio', - version='2.2.15', + version='2.3.0a', include_package_data=True, description='Python library for easily interacting with trained machine learning models', author='Abubakar Abid',