mirror of
https://github.com/gradio-app/gradio.git
synced 2024-12-15 02:11:15 +08:00
updated PyPi version
This commit is contained in:
parent
e52fc1d58f
commit
811643f3d5
@ -1,6 +1,10 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
import random
|
||||
|
||||
def calculator(num1, operation, num2):
|
||||
print("->", num1, operation, num2)
|
||||
time.sleep(10 + 2 * random.random())
|
||||
if operation == "add":
|
||||
return num1 + num2
|
||||
elif operation == "subtract":
|
||||
@ -22,6 +26,7 @@ iface = gr.Interface(calculator,
|
||||
title="test calculator",
|
||||
description="heres a sample toy calculator. enjoy!",
|
||||
flagging_options=["this", "or", "that"],
|
||||
enable_queue=True
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -89,11 +89,7 @@ class VideoInput extends BaseComponent {
|
||||
|
||||
class VideoInputExample extends ComponentExample {
|
||||
render() {
|
||||
return (
|
||||
<span className="input_video_example">
|
||||
{this.props.value}
|
||||
</span>
|
||||
);
|
||||
return <span className="input_video_example">{this.props.value}</span>;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,11 @@ export class GradioInterface extends React.Component {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.state = this.get_default_state();
|
||||
this.pending_response = false;
|
||||
this.state["examples_page"] = 0;
|
||||
this.state["avg_duration"] = Array.isArray(this.props.avg_durations)
|
||||
? this.props.avg_durations[0]
|
||||
: null;
|
||||
this.examples_dir =
|
||||
process.env.REACT_APP_BACKEND_URL +
|
||||
(this.props.examples_dir === null
|
||||
@ -77,21 +81,31 @@ export class GradioInterface extends React.Component {
|
||||
state["submitting"] = false;
|
||||
state["error"] = false;
|
||||
state["complete"] = false;
|
||||
state["last_duration"] = null;
|
||||
state["interpretation"] = null;
|
||||
state["just_flagged"] = false;
|
||||
state["has_changed"] = false;
|
||||
state["example_id"] = null;
|
||||
state["flag_index"] = null;
|
||||
state["queue_index"] = null;
|
||||
state["initial_queue_index"] = null;
|
||||
return state;
|
||||
};
|
||||
clear = () => {
|
||||
this.pending_response = false;
|
||||
this.setState(this.get_default_state());
|
||||
};
|
||||
submit = () => {
|
||||
if (this.pending_response) {
|
||||
return;
|
||||
}
|
||||
this.pending_response = true;
|
||||
let input_state = [];
|
||||
for (let i = 0; i < this.props.input_components.length; i++) {
|
||||
if (this.state[i] === null && this.props.input_components[i].optional !== true) {
|
||||
if (
|
||||
this.state[i] === null &&
|
||||
this.props.input_components[i].optional !== true
|
||||
) {
|
||||
return;
|
||||
}
|
||||
input_state[i] = this.state[i];
|
||||
@ -105,8 +119,14 @@ export class GradioInterface extends React.Component {
|
||||
this.props
|
||||
.fn(input_state, "predict", this.queueCallback)
|
||||
.then((output) => {
|
||||
if (!this.pending_response) {
|
||||
return;
|
||||
}
|
||||
this.pending_response = false;
|
||||
let index_start = this.props.input_components.length;
|
||||
let new_state = {};
|
||||
new_state["last_duration"] = output["durations"][0];
|
||||
new_state["avg_duration"] = output["avg_durations"][0];
|
||||
for (let [i, value] of output["data"].entries()) {
|
||||
new_state[index_start + i] = value;
|
||||
}
|
||||
@ -122,6 +142,7 @@ export class GradioInterface extends React.Component {
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error(e);
|
||||
this.pending_response = false;
|
||||
this.setState({
|
||||
error: true,
|
||||
submitting: false
|
||||
@ -153,9 +174,10 @@ export class GradioInterface extends React.Component {
|
||||
this.props.fn(component_state, "flag");
|
||||
};
|
||||
interpret = () => {
|
||||
if (!this.state.complete) {
|
||||
if (this.pending_response) {
|
||||
return;
|
||||
}
|
||||
this.pending_response = true;
|
||||
let input_state = [];
|
||||
for (let i = 0; i < this.props.input_components.length; i++) {
|
||||
if (this.state[i] === null) {
|
||||
@ -167,6 +189,10 @@ export class GradioInterface extends React.Component {
|
||||
this.props
|
||||
.fn(input_state, "interpret", this.queueCallback)
|
||||
.then((output) => {
|
||||
if (!this.pending_response) {
|
||||
return;
|
||||
}
|
||||
this.pending_response = false;
|
||||
this.setState({
|
||||
interpretation: output["interpretation_scores"],
|
||||
submitting: false
|
||||
@ -174,6 +200,7 @@ export class GradioInterface extends React.Component {
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error(e);
|
||||
this.pending_response = false;
|
||||
this.setState({
|
||||
error: true,
|
||||
submitting: false
|
||||
@ -183,8 +210,13 @@ export class GradioInterface extends React.Component {
|
||||
removeInterpret = () => {
|
||||
this.setState({ interpretation: null });
|
||||
};
|
||||
queueCallback = (queue_index) => {
|
||||
this.setState({ queue_index: queue_index });
|
||||
queueCallback = (queue_index, is_initial) => {
|
||||
let new_state = {};
|
||||
if (is_initial === true) {
|
||||
new_state["initial_queue_index"] = queue_index;
|
||||
}
|
||||
new_state["queue_index"] = queue_index;
|
||||
this.setState(new_state);
|
||||
};
|
||||
takeScreenshot = () => {
|
||||
html2canvas(ReactDOM.findDOMNode(this).parentNode).then((canvas) => {
|
||||
@ -222,9 +254,14 @@ export class GradioInterface extends React.Component {
|
||||
render() {
|
||||
let status = false;
|
||||
if (this.state.submitting) {
|
||||
let expected_duration = this.state.avg_duration;
|
||||
if (this.state.initial_queue_index && this.state.avg_duration !== null) {
|
||||
expected_duration *= this.state.initial_queue_index + 2;
|
||||
}
|
||||
status = (
|
||||
<div className="loading">
|
||||
{this.state.queue_index !== null
|
||||
<MemoizedGradioTimer expected_duration={expected_duration} />
|
||||
{this.state.queue_index !== null && this.state.queue_index >= 0
|
||||
? "queued @ " + this.state.queue_index
|
||||
: false}
|
||||
<img alt="loading" src={logo_loading} />
|
||||
@ -236,6 +273,12 @@ export class GradioInterface extends React.Component {
|
||||
<img className="loading_failed" alt="error" src={logo_error} />
|
||||
</div>
|
||||
);
|
||||
} else if (this.state.complete && this.state.last_duration !== null) {
|
||||
status = (
|
||||
<div className="loading">
|
||||
{this.state.last_duration.toFixed(2) + "s"}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<div className="gradio_interface" theme={this.props.theme}>
|
||||
@ -288,7 +331,7 @@ export class GradioInterface extends React.Component {
|
||||
>
|
||||
<div
|
||||
className={classNames("component_set", "relative", {
|
||||
"opacity-50": status && !this.props.live
|
||||
"opacity-50": this.pending_response && !this.props.live
|
||||
})}
|
||||
>
|
||||
{status}
|
||||
@ -369,10 +412,11 @@ export class GradioInterface extends React.Component {
|
||||
</div>
|
||||
</div>
|
||||
{this.props.examples ? (
|
||||
<GradioInterfaceExamples
|
||||
<MemoizedGradioInterfaceExamples
|
||||
examples={this.props.examples}
|
||||
examples_dir={this.examples_dir}
|
||||
example_id={this.state.example_id}
|
||||
examples_per_page={this.props.examples_per_page}
|
||||
input_components={this.props.input_components}
|
||||
output_components={this.props.output_components}
|
||||
handleExampleChange={this.handleExampleChange}
|
||||
@ -386,14 +430,47 @@ export class GradioInterface extends React.Component {
|
||||
}
|
||||
|
||||
class GradioInterfaceExamples extends React.Component {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.state = { page: 0 };
|
||||
}
|
||||
set_page(page) {
|
||||
this.setState({ page: page });
|
||||
}
|
||||
render() {
|
||||
let selected_examples = this.props.examples.slice();
|
||||
let examples_count = this.props.examples.length;
|
||||
let paginate = examples_count > this.props.examples_per_page;
|
||||
let page_count = 1;
|
||||
let visible_pages = [];
|
||||
if (paginate) {
|
||||
selected_examples = selected_examples.slice(
|
||||
this.state.page * this.props.examples_per_page,
|
||||
(this.state.page + 1) * this.props.examples_per_page
|
||||
);
|
||||
page_count = Math.ceil(examples_count / this.props.examples_per_page);
|
||||
[0, this.state.page, page_count - 1].forEach((anchor) => {
|
||||
for (let i = anchor - 2; i <= anchor + 2; i++) {
|
||||
if (i >= 0 && i < page_count && !visible_pages.includes(i)) {
|
||||
if (
|
||||
visible_pages.length > 0 &&
|
||||
i - visible_pages[visible_pages.length - 1] > 1
|
||||
) {
|
||||
visible_pages.push(null);
|
||||
}
|
||||
visible_pages.push(i);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return (
|
||||
<div className="examples">
|
||||
<h4>Examples</h4>
|
||||
<div className="pages hidden">Page:</div>
|
||||
<table className={classNames("examples_table", {
|
||||
"gallery": this.props.input_components.length === 1
|
||||
})}>
|
||||
<table
|
||||
className={classNames("examples_table", {
|
||||
gallery: this.props.input_components.length === 1
|
||||
})}
|
||||
>
|
||||
<thead>
|
||||
<tr>
|
||||
{this.props.input_components.map((component, i) => {
|
||||
@ -412,7 +489,8 @@ class GradioInterfaceExamples extends React.Component {
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{this.props.examples.map((example_row, i) => {
|
||||
{selected_examples.map((example_row, page_i) => {
|
||||
let i = page_i + this.state.page * this.props.examples_per_page;
|
||||
return (
|
||||
<tr
|
||||
key={i}
|
||||
@ -441,7 +519,70 @@ class GradioInterfaceExamples extends React.Component {
|
||||
})}
|
||||
</tbody>
|
||||
</table>
|
||||
{paginate ? (
|
||||
<div class="pages">
|
||||
Pages:
|
||||
{visible_pages.map((page) =>
|
||||
page === null ? (
|
||||
<div>...</div>
|
||||
) : (
|
||||
<button
|
||||
className={classNames("page", {
|
||||
selected: page === this.state.page
|
||||
})}
|
||||
key={page}
|
||||
onClick={this.set_page.bind(this, page)}
|
||||
>
|
||||
{page + 1}
|
||||
</button>
|
||||
)
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
false
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class GradioTimer extends React.Component {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.state = { time: new Date(), start_time: new Date() };
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.timerID = setInterval(() => this.tick(), 1000);
|
||||
}
|
||||
|
||||
componentWillUnmount() {
|
||||
clearInterval(this.timerID);
|
||||
}
|
||||
|
||||
tick() {
|
||||
this.setState({
|
||||
time: new Date()
|
||||
});
|
||||
}
|
||||
|
||||
render() {
|
||||
return (
|
||||
<div>
|
||||
{Math.round(
|
||||
(this.state.time.getTime() - this.state.start_time.getTime()) / 1000
|
||||
)}
|
||||
.0
|
||||
{this.props.expected_duration !== null ? (
|
||||
<>/{this.props.expected_duration.toFixed(1)}</>
|
||||
) : (
|
||||
false
|
||||
)}
|
||||
s
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const MemoizedGradioInterfaceExamples = React.memo(GradioInterfaceExamples);
|
||||
const MemoizedGradioTimer = React.memo(GradioTimer);
|
||||
|
@ -24,7 +24,12 @@ let fn = async (endpoint, queue, data, action, queue_callback) => {
|
||||
data: data,
|
||||
action: action
|
||||
});
|
||||
let hash = await output.text();
|
||||
const output_json = await output.json();
|
||||
let [hash, queue_position] = [
|
||||
output_json["hash"],
|
||||
output_json["queue_position"]
|
||||
];
|
||||
queue_callback(queue_position, /*is_initial=*/ true);
|
||||
let status = "UNKNOWN";
|
||||
while (status != "COMPLETE" && status != "FAILED") {
|
||||
if (status != "UNKNOWN") {
|
||||
|
@ -107,6 +107,15 @@ html {
|
||||
@apply py-2 px-4;
|
||||
}
|
||||
}
|
||||
.pages {
|
||||
@apply flex gap-1 items-center mt-2;
|
||||
}
|
||||
.page {
|
||||
@apply px-2 py-1 bg-gray-100 rounded;
|
||||
}
|
||||
.page.selected {
|
||||
@apply bg-gray-300;
|
||||
}
|
||||
}
|
||||
/* Input Components */
|
||||
.input_text {
|
||||
|
@ -45,7 +45,7 @@
|
||||
|
||||
.gradio_interface[theme="default"] {
|
||||
.loading {
|
||||
@apply absolute right-1;
|
||||
@apply absolute right-2 flex items-center gap-2;
|
||||
}
|
||||
.loading img {
|
||||
@apply h-5 ml-2 inline-block;
|
||||
@ -160,6 +160,15 @@
|
||||
@apply cursor-pointer p-2 rounded bg-gray-100;
|
||||
}
|
||||
}
|
||||
.pages {
|
||||
@apply flex gap-1 items-center mt-2;
|
||||
}
|
||||
.page {
|
||||
@apply px-2 py-1 bg-gray-100 rounded;
|
||||
}
|
||||
.page.selected {
|
||||
@apply bg-gray-300;
|
||||
}
|
||||
}
|
||||
/* Input Components */
|
||||
.input_text {
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
.gradio_interface[theme="huggingface"] {
|
||||
.loading {
|
||||
@apply absolute right-1;
|
||||
@apply absolute right-2 flex items-center gap-2 text-sm text-gray-700;
|
||||
}
|
||||
.loading img {
|
||||
@apply h-5;
|
||||
@ -26,6 +26,7 @@
|
||||
}
|
||||
.component_set {
|
||||
@apply border border-gray-100 p-4 rounded-lg flex flex-col flex-1 gap-3 bg-gradient-to-br from-gray-50 to-white;
|
||||
min-height: 36px;
|
||||
}
|
||||
.panel_header {
|
||||
@apply flex items-center text-sm text-gray-700 mb-1.5;
|
||||
@ -56,12 +57,6 @@
|
||||
.screenshot_set {
|
||||
@apply hidden flex hidden flex-grow;
|
||||
}
|
||||
.panel_button.left_panel_button {
|
||||
@apply rounded-tr-none rounded-br-none;
|
||||
}
|
||||
.panel_button.right_panel_button {
|
||||
@apply rounded-tl-none rounded-bl-none bg-gray-100 hover:bg-gray-300;
|
||||
}
|
||||
.examples {
|
||||
h4 {
|
||||
@apply text-lg font-semibold my-2;
|
||||
@ -75,16 +70,13 @@
|
||||
.shortcut {
|
||||
@apply block text-xs;
|
||||
}
|
||||
.examples_control button {
|
||||
@apply bg-gray-100 hover:bg-gray-200 p-2;
|
||||
}
|
||||
.examples_table:not(.gallery) {
|
||||
@apply table-auto p-2 bg-gray-100 mt-4 rounded;
|
||||
@apply table-auto p-2 bg-gray-50 mt-4 rounded border border-gray-100;
|
||||
tbody tr {
|
||||
@apply cursor-pointer;
|
||||
}
|
||||
thead {
|
||||
@apply border-b-2 border-gray-300;
|
||||
@apply border-b-2 border-gray-100;
|
||||
}
|
||||
tbody tr:hover {
|
||||
@apply bg-indigo-500 text-white;
|
||||
@ -112,9 +104,18 @@
|
||||
@apply font-bold;
|
||||
}
|
||||
td {
|
||||
@apply cursor-pointer p-2 rounded bg-gray-100;
|
||||
@apply cursor-pointer p-2 rounded bg-gray-50 ;
|
||||
}
|
||||
}
|
||||
.pages {
|
||||
@apply flex gap-1 items-center mt-2;
|
||||
}
|
||||
.page {
|
||||
@apply px-2 py-1 bg-gray-100 rounded;
|
||||
}
|
||||
.page.selected {
|
||||
@apply bg-gray-300;
|
||||
}
|
||||
}
|
||||
/* Input Components */
|
||||
.input_text {
|
||||
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: gradio
|
||||
Version: 2.2.15
|
||||
Version: 2.3.0a0
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "/static/css/main.8fde07f5.css",
|
||||
"main.css": "/static/css/main.2ddd43af.css",
|
||||
"main.js": "/static/bundle.js",
|
||||
"index.html": "/index.html",
|
||||
"static/bundle.js.LICENSE.txt": "/static/bundle.js.LICENSE.txt",
|
||||
@ -9,7 +9,7 @@
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/bundle.css",
|
||||
"static/css/main.8fde07f5.css",
|
||||
"static/css/main.2ddd43af.css",
|
||||
"static/bundle.js"
|
||||
]
|
||||
}
|
@ -8,4 +8,4 @@
|
||||
window.config = {{ config|tojson }};
|
||||
} catch (e) {
|
||||
window.config = {};
|
||||
}</script><script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script><title>Gradio</title><link href="static/bundle.css" rel="stylesheet"><link href="static/css/main.8fde07f5.css" rel="stylesheet"></head><body style="height:100%"><div id="root" style="height:100%"></div><script src="static/bundle.js"></script></body></html>
|
||||
}</script><script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script><title>Gradio</title><link href="static/bundle.css" rel="stylesheet"><link href="static/css/main.2ddd43af.css" rel="stylesheet"></head><body style="height:100%"><div id="root" style="height:100%"></div><script src="static/bundle.js"></script></body></html>
|
@ -123,6 +123,7 @@ class Interface:
|
||||
raise ValueError("Invalid value for parameter: interpretation")
|
||||
|
||||
self.predict = fn
|
||||
self.predict_durations = [[0, 0]] * len(fn)
|
||||
self.function_names = [func.__name__ for func in fn]
|
||||
self.__name__ = ", ".join(self.function_names)
|
||||
self.verbose = verbose
|
||||
@ -335,14 +336,6 @@ class Interface:
|
||||
predictions[i]) if predictions[i] is not None else None for i, output_component in enumerate(self.output_components)]
|
||||
return processed_output, durations
|
||||
|
||||
def embed(self, processed_input):
|
||||
if self.embedding == "default":
|
||||
embeddings = np.concatenate([input_component.embed(processed_input[i])
|
||||
for i, input_component in enumerate(self.input_components)])
|
||||
else:
|
||||
embeddings = self.embedding(*processed_input)
|
||||
return embeddings
|
||||
|
||||
def interpret(self, raw_input):
|
||||
"""
|
||||
Runs the interpretation command for the machine learning model. Handles both the "default" out-of-the-box
|
||||
|
@ -178,7 +178,14 @@ def enable_sharing(path):
|
||||
def predict():
|
||||
raw_input = request.json["data"]
|
||||
prediction, durations = app.interface.process(raw_input)
|
||||
output = {"data": prediction, "durations": durations}
|
||||
avg_durations = []
|
||||
for i, duration in enumerate(durations):
|
||||
app.interface.predict_durations[i][0] += duration
|
||||
app.interface.predict_durations[i][1] += 1
|
||||
avg_durations.append(app.interface.predict_durations[i][0]
|
||||
/ app.interface.predict_durations[i][1])
|
||||
app.interface.config["avg_durations"] = avg_durations
|
||||
output = {"data": prediction, "durations": durations, "avg_durations": avg_durations}
|
||||
if app.interface.allow_flagging == "auto":
|
||||
try:
|
||||
flag_index = flag_data(raw_input, prediction,
|
||||
@ -398,8 +405,8 @@ def file(path):
|
||||
def queue_push():
|
||||
data = request.json["data"]
|
||||
action = request.json["action"]
|
||||
job_hash = queue.push({"data": data}, action)
|
||||
return job_hash
|
||||
job_hash, queue_position = queue.push({"data": data}, action)
|
||||
return {"hash": job_hash, "queue_position": queue_position}
|
||||
|
||||
@app.route("/api/queue/status/", methods=["POST"])
|
||||
@login_check
|
||||
|
@ -7,16 +7,16 @@ DB_FILE = "gradio_queue.db"
|
||||
|
||||
def generate_hash():
|
||||
generate = True
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
while generate:
|
||||
hash = uuid.uuid4().hex
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("""
|
||||
SELECT hash FROM queue
|
||||
WHERE hash = ?;
|
||||
""", (hash,))
|
||||
conn.commit()
|
||||
generate = c.fetchone() is not None
|
||||
conn.commit()
|
||||
return hash
|
||||
|
||||
def init():
|
||||
@ -24,7 +24,6 @@ def init():
|
||||
os.remove(DB_FILE)
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("BEGIN EXCLUSIVE")
|
||||
c.execute("""CREATE TABLE queue (
|
||||
queue_index integer PRIMARY KEY,
|
||||
hash text,
|
||||
@ -71,22 +70,38 @@ def push(input_data, action):
|
||||
hash = generate_hash()
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("BEGIN EXCLUSIVE")
|
||||
c.execute("""
|
||||
INSERT INTO queue (hash, input_data, action)
|
||||
VALUES (?, ?, ?);
|
||||
""", (hash, input_data, action))
|
||||
""", (hash, input_data, action))
|
||||
queue_index = c.lastrowid
|
||||
c.execute("""
|
||||
SELECT COUNT(*) FROM queue WHERE queue_index < ? and popped = 0;
|
||||
""", (queue_index,))
|
||||
queue_position = c.fetchone()[0]
|
||||
if queue_position is None:
|
||||
conn.commit()
|
||||
raise ValueError("Hash not found.")
|
||||
elif queue_position == 0:
|
||||
c.execute("""
|
||||
SELECT COUNT(*) FROM jobs WHERE status = "PENDING";
|
||||
""")
|
||||
result = c.fetchone()
|
||||
if result[0] == 0:
|
||||
queue_position -= 1
|
||||
conn.commit()
|
||||
return hash
|
||||
return hash, queue_position
|
||||
|
||||
def get_status(hash):
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("BEGIN EXCLUSIVE")
|
||||
c.execute("""
|
||||
SELECT queue_index, popped FROM queue WHERE hash = ?;
|
||||
""", (hash,))
|
||||
result = c.fetchone()
|
||||
if result is None:
|
||||
conn.commit()
|
||||
raise ValueError("Hash not found.")
|
||||
if result[1] == 1: # in jobs
|
||||
c.execute("""
|
||||
SELECT status, output_data, error_message FROM jobs WHERE hash = ?;
|
||||
@ -110,14 +125,22 @@ def get_status(hash):
|
||||
conn.commit()
|
||||
output_data = json.loads(output_data)
|
||||
return "COMPLETE", output_data
|
||||
else:
|
||||
else: # in queue
|
||||
queue_index = result[0]
|
||||
c.execute("""
|
||||
SELECT COUNT(*) FROM queue WHERE queue_index < ? and popped = 0;
|
||||
""", (queue_index,))
|
||||
result = c.fetchone()
|
||||
queue_position = result[0]
|
||||
if queue_position == 0:
|
||||
c.execute("""
|
||||
SELECT COUNT(*) FROM jobs WHERE status = "PENDING";
|
||||
""")
|
||||
result = c.fetchone()
|
||||
if result[0] == 0:
|
||||
queue_position -= 1
|
||||
conn.commit()
|
||||
return "QUEUED", result[0]
|
||||
return "QUEUED", queue_position
|
||||
|
||||
def start_job(hash):
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
@ -134,7 +157,6 @@ def start_job(hash):
|
||||
def fail_job(hash, error_message):
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("BEGIN EXCLUSIVE")
|
||||
c.execute("""
|
||||
UPDATE jobs SET status = 'FAILED', error_message = ? WHERE hash = ?;
|
||||
""", (error_message, hash,))
|
||||
@ -144,7 +166,6 @@ def pass_job(hash, output_data):
|
||||
output_data = json.dumps(output_data)
|
||||
conn = sqlite3.connect(DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.execute("BEGIN EXCLUSIVE")
|
||||
c.execute("""
|
||||
UPDATE jobs SET status = 'COMPLETE', output_data = ? WHERE hash = ?;
|
||||
""", (output_data, hash,))
|
||||
|
@ -1 +1 @@
|
||||
2.2.15
|
||||
2.3.0a
|
||||
|
Loading…
Reference in New Issue
Block a user