mirror of
https://github.com/gradio-app/gradio.git
synced 2025-04-06 12:30:29 +08:00
merged
This commit is contained in:
commit
9ed6d3b8d7
1
.gitignore
vendored
1
.gitignore
vendored
@ -5,4 +5,5 @@ staticfiles
|
||||
*.sqlite3
|
||||
.idea/*
|
||||
.ipynb_checkpoints/*
|
||||
*.h5
|
||||
|
||||
|
30
.idea/workspace.xml
generated
30
.idea/workspace.xml
generated
@ -2,11 +2,26 @@
|
||||
<project version="4">
|
||||
<component name="ChangeListManager">
|
||||
<list default="true" id="fd73cd66-e80f-470e-a2ec-e220d3b6b864" name="Default Changelist" comment="">
|
||||
<change afterPath="$PROJECT_DIR$/.idea/deployment.xml" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/.ipynb_checkpoints/Usage-checkpoint.ipynb" beforeDir="false" afterPath="$PROJECT_DIR$/.ipynb_checkpoints/Usage-checkpoint.ipynb" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/.ipynb_checkpoints/Emotion Detector-checkpoint.ipynb" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/Emotion Detector.ipynb" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/README.md" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/js/all-io.js" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/js/class-output.js" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/js/emotion-detector.js" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/js/sketchpad-input.js" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/js/webcam-input.js" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/model.h5" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/model2.h5" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/script.py" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/all_io.html" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/class_output.html" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/emotion_detector.html" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/sketchpad_input.html" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/tmp_html.html" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/templates/webcam_input.html" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/Usage.ipynb" beforeDir="false" afterPath="$PROJECT_DIR$/Usage.ipynb" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/gradio.py" beforeDir="false" afterPath="$PROJECT_DIR$/gradio.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/index.html" beforeDir="false" afterPath="$PROJECT_DIR$/index.html" afterDir="false" />
|
||||
</list>
|
||||
<option name="EXCLUDED_CONVERTED_TO_IGNORED" value="true" />
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
@ -162,8 +177,8 @@
|
||||
<file pinned="false" current-in-tab="true">
|
||||
<entry file="file://$PROJECT_DIR$/gradio.py">
|
||||
<provider selected="true" editor-type-id="text-editor">
|
||||
<state relative-caret-position="308">
|
||||
<caret line="100" column="33" lean-forward="true" selection-start-line="100" selection-start-column="33" selection-end-line="100" selection-end-column="33" />
|
||||
<state relative-caret-position="427">
|
||||
<caret line="107" column="33" selection-start-line="107" selection-start-column="33" selection-end-line="107" selection-end-column="33" />
|
||||
<folding>
|
||||
<element signature="e#0#35#0" expanded="true" />
|
||||
</folding>
|
||||
@ -333,7 +348,6 @@
|
||||
</component>
|
||||
<component name="ToolWindowManager">
|
||||
<frame x="-6" y="-6" width="1513" height="1013" extended-state="6" />
|
||||
<editor active="true" />
|
||||
<layout>
|
||||
<window_info content_ui="combo" id="Project" order="0" visible="true" weight="0.14556533" />
|
||||
<window_info id="Structure" order="1" side_tool="true" weight="0.25" />
|
||||
@ -457,8 +471,8 @@
|
||||
</entry>
|
||||
<entry file="file://$PROJECT_DIR$/gradio.py">
|
||||
<provider selected="true" editor-type-id="text-editor">
|
||||
<state relative-caret-position="308">
|
||||
<caret line="100" column="33" lean-forward="true" selection-start-line="100" selection-start-column="33" selection-end-line="100" selection-end-column="33" />
|
||||
<state relative-caret-position="427">
|
||||
<caret line="107" column="33" selection-start-line="107" selection-start-column="33" selection-end-line="107" selection-end-column="33" />
|
||||
<folding>
|
||||
<element signature="e#0#35#0" expanded="true" />
|
||||
</folding>
|
||||
|
136
.ipynb_checkpoints/Emotion Detector-checkpoint.ipynb
Normal file
136
.ipynb_checkpoints/Emotion Detector-checkpoint.ipynb
Normal file
@ -0,0 +1,136 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
|
||||
"from keras.losses import categorical_crossentropy\n",
|
||||
"from keras.optimizers import Adam\n",
|
||||
"from keras.regularizers import l2\n",
|
||||
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:191: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 191 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" input_soup = BeautifulSoup(input_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:192: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 192 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" output_soup = BeautifulSoup(output_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:196: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 196 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='sketchpad',output='class',model_obj=model, model_type='keras')\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,124 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example Usage of Gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here is the code to define a model and train it. It may take a few minutes to train on a machine without GPUs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/1\n",
|
||||
"60000/60000 [==============================] - 25s 417us/step - loss: 0.2184 - acc: 0.9352\n",
|
||||
"10000/10000 [==============================] - 1s 89us/step\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[0.10298147459961474, 0.9698]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mnist = tf.keras.datasets.mnist\n",
|
||||
"\n",
|
||||
"(x_train, y_train),(x_test, y_test) = mnist.load_data()\n",
|
||||
"x_train, x_test = x_train.reshape(-1, 784) / 255.0, x_test.reshape(-1, 784) / 255.0\n",
|
||||
"\n",
|
||||
"model = tf.keras.models.Sequential([\n",
|
||||
" tf.keras.layers.Flatten(),\n",
|
||||
" tf.keras.layers.Dense(512, activation=tf.nn.relu),\n",
|
||||
" tf.keras.layers.Dropout(0.2),\n",
|
||||
" tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
|
||||
"])\n",
|
||||
"model.compile(optimizer='adam',\n",
|
||||
" loss='sparse_categorical_crossentropy',\n",
|
||||
" metrics=['accuracy'])\n",
|
||||
"\n",
|
||||
"model.fit(x_train, y_train, epochs=1)\n",
|
||||
"model.evaluate(x_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here, we simply take the trained model and pass it into **gradio**. When you run this, it should open up a new browser window and show allow you to interact with the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using keras model\n",
|
||||
"Using keras model\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gradio.DrawADigit(model_obj=model, model_type='keras').start()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
125
Emotion Detector.ipynb
Normal file
125
Emotion Detector.ipynb
Normal file
@ -0,0 +1,125 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
|
||||
"from keras.losses import categorical_crossentropy\n",
|
||||
"from keras.optimizers import Adam\n",
|
||||
"from keras.regularizers import l2\n",
|
||||
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:191: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 191 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" input_soup = BeautifulSoup(input_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:192: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 192 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" output_soup = BeautifulSoup(output_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:196: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 196 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras')\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
10
README.md
Normal file
10
README.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Gradiome / Gradio
|
||||
|
||||
Gradio is a python library that allows you to place input and output interfaces over trained models to make it easy for you to "play around" with your model. Gradio runs entirely locally using your browser.
|
||||
|
||||
To get started understanding gradio, I recommend you go in the following order:
|
||||
1. Run the Usage.ipynb python notebook
|
||||
2. Look at the gradio.py code
|
||||
3. Look at the templates/draw_a_digit.html
|
||||
|
||||
You'll notice that gradio includes both front-end design (in the HTML folder) as well as back-end design (gradio.py).
|
149
Usage.ipynb
149
Usage.ipynb
@ -1,149 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Example Usage of Gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here is the code to define a model and train it. It may take a few minutes to train on a machine without GPUs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/1\n",
|
||||
"60000/60000 [==============================] - 16s 266us/step - loss: 0.2188 - acc: 0.9352\n",
|
||||
"10000/10000 [==============================] - 0s 47us/step\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[0.12200433680266141, 0.9606]"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"mnist = tf.keras.datasets.mnist\n",
|
||||
"\n",
|
||||
"(x_train, y_train),(x_test, y_test) = mnist.load_data()\n",
|
||||
"x_train, x_test = x_train.reshape(-1, 784) / 255.0, x_test.reshape(-1, 784) / 255.0\n",
|
||||
"\n",
|
||||
"model = tf.keras.models.Sequential([\n",
|
||||
" tf.keras.layers.Flatten(),\n",
|
||||
" tf.keras.layers.Dense(512, activation=tf.nn.relu),\n",
|
||||
" tf.keras.layers.Dropout(0.2),\n",
|
||||
" tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
|
||||
"])\n",
|
||||
"model.compile(optimizer='adam',\n",
|
||||
" loss='sparse_categorical_crossentropy',\n",
|
||||
" metrics=['accuracy'])\n",
|
||||
"\n",
|
||||
"model.fit(x_train, y_train, epochs=1)\n",
|
||||
"model.evaluate(x_test, y_test)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Here, we simply take the trained model and pass it into **gradio**. When you run this, it should open up a new browser window and show allow you to interact with the model."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "OSError",
|
||||
"evalue": "[Errno 10048] error while attempting to bind on address ('127.0.0.1', 5679): only one usage of each socket address (protocol/network address/port) is normally permitted",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\asyncio\\base_events.py\u001b[0m in \u001b[0;36mcreate_server\u001b[1;34m(self, protocol_factory, host, port, family, flags, sock, backlog, ssl, reuse_address, reuse_port)\u001b[0m\n\u001b[0;32m 1050\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1051\u001b[1;33m \u001b[0msock\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbind\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msa\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1052\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mOSError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0merr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mOSError\u001b[0m: [WinError 10048] Only one usage of each socket address (protocol/network address/port) is normally permitted",
|
||||
"\nDuring handling of the above exception, another exception occurred:\n",
|
||||
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-7-5cd3a5d36036>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDrawADigit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel_obj\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'keras'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;32m~\\Repos\\gradio\\gradio.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 131\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 132\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 133\u001b[1;33m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 134\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 135\u001b[0m \u001b[1;33masync\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcommunicate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mwebsocket\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Repos\\gradio\\gradio.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 92\u001b[0m \u001b[0mwebbrowser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'file://'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrealpath\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_template_path\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 93\u001b[0m \u001b[0mstart_server\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwebsockets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserve\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcommunicate\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mLOCALHOST_IP\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSOCKET_PORT\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 94\u001b[1;33m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_until_complete\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstart_server\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 95\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 96\u001b[0m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_forever\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\nest_asyncio.py\u001b[0m in \u001b[0;36mrun_until_complete\u001b[1;34m(self, future)\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[1;32mwhile\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 58\u001b[0m \u001b[0mrun_once\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 59\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 60\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 61\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_run_until_complete_orig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfuture\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m_step\u001b[1;34m(***failed resolving arguments***)\u001b[0m\n\u001b[0;32m 178\u001b[0m \u001b[1;31m# We use the `send` method directly, because coroutines\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 179\u001b[0m \u001b[1;31m# don't have `__iter__` and `__next__` methods.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 180\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 181\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 182\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mthrow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m_wrap_awaitable\u001b[1;34m(awaitable)\u001b[0m\n\u001b[0;32m 535\u001b[0m \u001b[0mthat\u001b[0m \u001b[0mwill\u001b[0m \u001b[0mlater\u001b[0m \u001b[0mbe\u001b[0m \u001b[0mwrapped\u001b[0m \u001b[1;32min\u001b[0m \u001b[0ma\u001b[0m \u001b[0mTask\u001b[0m \u001b[0mby\u001b[0m \u001b[0mensure_future\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 536\u001b[0m \"\"\"\n\u001b[1;32m--> 537\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;32myield\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mawaitable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__await__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 538\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 539\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\py35\\server.py\u001b[0m in \u001b[0;36m__await_impl__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[1;31m# Duplicated with __iter__ because Python 3.7 requires an async function\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[1;31m# (as explained in __await__ below) which Python 3.4 doesn't support.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 13\u001b[1;33m \u001b[0mserver\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mawait\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_creating_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 14\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mserver\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 15\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\envs\\tensorflow\\lib\\asyncio\\base_events.py\u001b[0m in \u001b[0;36mcreate_server\u001b[1;34m(self, protocol_factory, host, port, family, flags, sock, backlog, ssl, reuse_address, reuse_port)\u001b[0m\n\u001b[0;32m 1053\u001b[0m raise OSError(err.errno, 'error while attempting '\n\u001b[0;32m 1054\u001b[0m \u001b[1;34m'to bind on address %r: %s'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1055\u001b[1;33m % (sa, err.strerror.lower()))\n\u001b[0m\u001b[0;32m 1056\u001b[0m \u001b[0mcompleted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1057\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mOSError\u001b[0m: [Errno 10048] error while attempting to bind on address ('127.0.0.1', 5679): only one usage of each socket address (protocol/network address/port) is normally permitted"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gradio.DrawADigit(model_obj=model, model_type='keras').start()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"subprocess.check_output(['ngrok','http', '5679'])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
171
gradio.py
171
gradio.py
@ -8,11 +8,13 @@ from io import BytesIO
|
||||
import numpy as np
|
||||
import os
|
||||
import webbrowser
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
LOCALHOST_IP = '127.0.0.1'
|
||||
SOCKET_PORT = 5679
|
||||
SOCKET_PORT = 5680
|
||||
|
||||
|
||||
|
||||
import requests, zipfile, io
|
||||
@ -69,33 +71,38 @@ def resize_and_crop(img, size, crop_type='top'):
|
||||
return img
|
||||
|
||||
|
||||
class AbstractInterface(ABC):
|
||||
class AbstractInput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio interfaces should have.
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
"""
|
||||
|
||||
def __init__(self, model_type, model_obj, **model_params):
|
||||
"""
|
||||
:param model_type: what kind of trained model, can be 'keras' or 'sklearn'.
|
||||
:param model_obj: the model object, such as a sklearn classifier or keras model.
|
||||
:param model_params: additional model parameters.
|
||||
"""
|
||||
self.model_type = model_type
|
||||
self.model_obj = model_obj
|
||||
self.model_params = model_params
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def start(self):
|
||||
|
||||
@abstractmethod
|
||||
def _get_template_path(self):
|
||||
"""
|
||||
Standard method shared by interfaces that launches a websocket at a specified IP address.
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
webbrowser.open('file://' + os.path.realpath(self._get_template_path()))
|
||||
start_server = websockets.serve(self.communicate, LOCALHOST_IP, SOCKET_PORT)
|
||||
asyncio.get_event_loop().run_until_complete(start_server)
|
||||
try:
|
||||
asyncio.get_event_loop().run_forever()
|
||||
except RuntimeError: # Runtime errors are thrown in jupyter notebooks because of async.
|
||||
pass
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _pre_process(self):
|
||||
"""
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
pass
|
||||
|
||||
class AbstractOutput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@abstractmethod
|
||||
def _get_template_path(self):
|
||||
@ -105,14 +112,100 @@ class AbstractInterface(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def communicate(self, websocket, path):
|
||||
def _post_process(self):
|
||||
"""
|
||||
All interfaces should define a custom method that defines how they communicate with the websocket.
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DrawADigit(AbstractInterface):
|
||||
class Sketchpad(AbstractInput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/sketchpad_input.html'
|
||||
|
||||
def _pre_process(self, imgstring):
|
||||
"""
|
||||
"""
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (28, 28))
|
||||
array = np.array(im).flatten().reshape(1, -1)
|
||||
return array
|
||||
|
||||
|
||||
class Webcam(AbstractInput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/webcam_input.html'
|
||||
|
||||
def _pre_process(self, imgstring):
|
||||
"""
|
||||
"""
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (48, 48))
|
||||
array = np.array(im).flatten().reshape(1,48,48,1)
|
||||
return array
|
||||
|
||||
class Class(AbstractOutput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/class_output.html'
|
||||
|
||||
def _post_process(self, prediction):
|
||||
"""
|
||||
"""
|
||||
emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
|
||||
return emotion_dict[prediction]
|
||||
|
||||
registry = {
|
||||
'webcam':Webcam,
|
||||
'sketchpad' :Sketchpad,
|
||||
'class' :Class,
|
||||
}
|
||||
|
||||
class Interface():
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, input, output, model_obj, model_type, **model_params):
|
||||
"""
|
||||
:param model_type: what kind of trained model, can be 'keras' or 'sklearn'.
|
||||
:param model_obj: the model object, such as a sklearn classifier or keras model.
|
||||
:param model_params: additional model parameters.
|
||||
"""
|
||||
self.input_interface = registry[input]()
|
||||
self.output_interface = registry[output]()
|
||||
self.model_type = model_type
|
||||
self.model_obj = model_obj
|
||||
self.model_params = model_params
|
||||
|
||||
def _build_template(self):
|
||||
input_template_path = self.input_interface._get_template_path()
|
||||
output_template_path = self.output_interface._get_template_path()
|
||||
input_page = open(input_template_path)
|
||||
output_page = open(output_template_path)
|
||||
input_soup = BeautifulSoup(input_page.read())
|
||||
output_soup = BeautifulSoup(output_page.read())
|
||||
|
||||
all_io_url = 'templates/all_io.html'
|
||||
all_io_page = open(all_io_url)
|
||||
all_io_soup = BeautifulSoup(all_io_page.read())
|
||||
input_tag = all_io_soup.find("div", {"id": "input"})
|
||||
output_tag = all_io_soup.find("div", {"id": "output"})
|
||||
|
||||
input_tag.replace_with(input_soup)
|
||||
output_tag.replace_with(output_soup)
|
||||
|
||||
f = open("templates/tmp_html.html", "w")
|
||||
f.write(str(all_io_soup.prettify))
|
||||
return 'templates/tmp_html.html'
|
||||
|
||||
def predict(self, array):
|
||||
if self.model_type=='sklearn':
|
||||
return self.model_obj.predict(array)[0]
|
||||
@ -121,11 +214,6 @@ class DrawADigit(AbstractInterface):
|
||||
else:
|
||||
raise ValueError('model_type must be sklearn.')
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/draw_a_digit.html'
|
||||
|
||||
def start(self):
|
||||
super().start()
|
||||
|
||||
async def communicate(self, websocket, path):
|
||||
"""
|
||||
@ -134,12 +222,19 @@ class DrawADigit(AbstractInterface):
|
||||
:param path: ignored
|
||||
"""
|
||||
while True:
|
||||
imgstring = await websocket.recv()
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (28, 28))
|
||||
array = np.array(im).flatten().reshape(1, -1)
|
||||
prediction = self.predict(array)
|
||||
await websocket.send(str(prediction))
|
||||
processed_input = self.input_interface._pre_process(await websocket.recv())
|
||||
prediction = self.predict(processed_input)
|
||||
processed_output = self.output_interface._post_process(prediction)
|
||||
await websocket.send(str(processed_output))
|
||||
|
||||
def launch(self):
|
||||
"""
|
||||
Standard method shared by interfaces that launches a websocket at a specified IP address.
|
||||
"""
|
||||
webbrowser.open('file://' + os.path.realpath(self._build_template()))
|
||||
start_server = websockets.serve(self.communicate, LOCALHOST_IP, SOCKET_PORT)
|
||||
asyncio.get_event_loop().run_until_complete(start_server)
|
||||
try:
|
||||
asyncio.get_event_loop().run_forever()
|
||||
except RuntimeError: # Runtime errors are thrown in jupyter notebooks because of async.
|
||||
pass
|
@ -176,7 +176,7 @@
|
||||
<p class="card-text"><strong>Emotion Detector</strong>: allow users to make emotions through their webcam and detect their emotion with your model.</p>
|
||||
<div class="d-flex justify-content-between align-items-center">
|
||||
<div class="btn-group">
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary">View</button>
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary" onclick="location.href='templates/emotion_detector.html';">View</button>
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary">Source</button>
|
||||
</div>
|
||||
<small class="text-muted"></small>
|
||||
|
13
js/all-io.js
Normal file
13
js/all-io.js
Normal file
@ -0,0 +1,13 @@
|
||||
try {
|
||||
var ws = new WebSocket("ws://127.0.0.1:5680/")
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
||||
|
||||
const sleep = (milliseconds) => {
|
||||
return new Promise(resolve => setTimeout(resolve, milliseconds))
|
||||
}
|
43
js/class-output.js
Normal file
43
js/class-output.js
Normal file
@ -0,0 +1,43 @@
|
||||
var predict_canvas = document.getElementById("predict_canvas");
|
||||
var predict_ctx = predict_canvas.getContext("2d");
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
ws.onmessage = function (event) {
|
||||
console.log(event.data);
|
||||
predict_ctx.clearRect(0, 0, 400, 400); // Clears the canvas
|
||||
predict_ctx.font = "60px Arial";
|
||||
predict_ctx.fillStyle = "white";
|
||||
sleep(300).then(() => {
|
||||
predict_ctx.textAlign = "center";
|
||||
predict_ctx.fillText(event.data, 200, 200);
|
||||
})
|
||||
|
||||
}
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
180
js/emotion-detector.js
Normal file
180
js/emotion-detector.js
Normal file
@ -0,0 +1,180 @@
|
||||
videoWidth = 400;
|
||||
videoHeight = 400;
|
||||
|
||||
function isAndroid() {
|
||||
return /Android/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isiOS() {
|
||||
return /iPhone|iPad|iPod/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isMobile() {
|
||||
return isAndroid() || isiOS();
|
||||
}
|
||||
|
||||
var predict_canvas = document.getElementById("predict_canvas");
|
||||
var predict_ctx = predict_canvas.getContext("2d");
|
||||
var canvas = document.getElementById("canvas");
|
||||
var ctx = canvas.getContext("2d");
|
||||
|
||||
|
||||
const sleep = (milliseconds) => {
|
||||
return new Promise(resolve => setTimeout(resolve, milliseconds))
|
||||
}
|
||||
|
||||
async function setupCamera() {
|
||||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
||||
throw new Error(
|
||||
'Browser API navigator.mediaDevices.getUserMedia not available');
|
||||
}
|
||||
|
||||
const video = document.getElementById('video');
|
||||
video.width = videoWidth;
|
||||
video.height = videoHeight;
|
||||
|
||||
|
||||
const mobile = isMobile();
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
'audio': false,
|
||||
'video': {
|
||||
facingMode: 'user',
|
||||
width: mobile ? undefined : videoWidth,
|
||||
height: mobile ? undefined : videoHeight,
|
||||
},
|
||||
});
|
||||
|
||||
video.srcObject = stream;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
video.onloadedmetadata = () => {
|
||||
resolve(video);
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
async function loadVideo() {
|
||||
const video = await setupCamera();
|
||||
video.play();
|
||||
|
||||
return video;
|
||||
}
|
||||
|
||||
function detectPoseInRealTime(video) {
|
||||
const flipHorizontal = true;
|
||||
async function poseDetectionFrame() {
|
||||
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
|
||||
requestAnimationFrame(poseDetectionFrame);
|
||||
|
||||
}
|
||||
|
||||
poseDetectionFrame();
|
||||
}
|
||||
|
||||
async function bindPage() {
|
||||
|
||||
let video;
|
||||
|
||||
try {
|
||||
video = await loadVideo();
|
||||
} catch (e) {
|
||||
let info = document.getElementById('info');
|
||||
info.textContent = 'this browser does not support video capture,' +
|
||||
'or this device does not have a camera';
|
||||
info.style.display = 'block';
|
||||
throw e;
|
||||
}
|
||||
|
||||
detectPoseInRealTime(video);
|
||||
|
||||
}
|
||||
|
||||
navigator.getUserMedia = navigator.getUserMedia ||
|
||||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
|
||||
// kick off the demo
|
||||
bindPage();
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
var ws = new WebSocket("ws://127.0.0.1:5679/")
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
ws.onmessage = function (event) {
|
||||
var emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
|
||||
console.log(event.data);
|
||||
predict_ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); // Clears the canvas
|
||||
predict_ctx.font = "60px Arial";
|
||||
predict_ctx.fillStyle = "white";
|
||||
sleep(300).then(() => {
|
||||
// predict_ctx.fillText(emotion_dict[event.data], 110, 310);
|
||||
predict_ctx.textAlign = "center";
|
||||
predict_ctx.fillText(emotion_dict[event.data], 200, 200);
|
||||
})
|
||||
|
||||
}
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
||||
$('#capture-button').click(function(e){
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
})
|
100
js/sketchpad-input.js
Normal file
100
js/sketchpad-input.js
Normal file
@ -0,0 +1,100 @@
|
||||
var canvas = document.getElementById('canvas');
|
||||
context = canvas.getContext("2d");
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$('#canvas').mousedown(function(e){
|
||||
var mouseX = e.pageX - this.getBoundingClientRect().left + document.documentElement.scrollLeft;
|
||||
var mouseY = e.pageY - this.getBoundingClientRect().top + document.documentElement.scrollTop
|
||||
;
|
||||
|
||||
paint = true;
|
||||
addClick(mouseX, mouseY);
|
||||
redraw();
|
||||
});
|
||||
$('#canvas').mousemove(function(e){
|
||||
if(paint){
|
||||
addClick(e.pageX - this.getBoundingClientRect().left + document.documentElement.scrollLeft, e.pageY - this.getBoundingClientRect().top + document.documentElement.scrollTop, true);
|
||||
redraw();
|
||||
}
|
||||
});
|
||||
$('#canvas').mouseup(function(e){
|
||||
paint = false;
|
||||
});
|
||||
$('#canvas').mouseleave(function(e){
|
||||
paint = false;
|
||||
});
|
||||
var clickX = new Array();
|
||||
var clickY = new Array();
|
||||
var clickDrag = new Array();
|
||||
var paint;
|
||||
|
||||
function addClick(x, y, dragging)
|
||||
{
|
||||
clickX.push(x);
|
||||
clickY.push(y);
|
||||
clickDrag.push(dragging);
|
||||
}
|
||||
function redraw(){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
|
||||
context.strokeStyle = "#FFF";
|
||||
context.lineJoin = "round";
|
||||
context.lineWidth = 25;
|
||||
|
||||
for(var i=0; i < clickX.length; i++) {
|
||||
context.beginPath();
|
||||
if(clickDrag[i] && i){
|
||||
context.moveTo(clickX[i-1], clickY[i-1]);
|
||||
}else{
|
||||
context.moveTo(clickX[i]-1, clickY[i]);
|
||||
}
|
||||
context.lineTo(clickX[i], clickY[i]);
|
||||
context.closePath();
|
||||
context.stroke();
|
||||
}
|
||||
}
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
152
js/webcam-input.js
Normal file
152
js/webcam-input.js
Normal file
@ -0,0 +1,152 @@
|
||||
videoWidth = 400;
|
||||
videoHeight = 400;
|
||||
|
||||
function isAndroid() {
|
||||
return /Android/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isiOS() {
|
||||
return /iPhone|iPad|iPod/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isMobile() {
|
||||
return isAndroid() || isiOS();
|
||||
}
|
||||
|
||||
var canvas = document.getElementById("canvas");
|
||||
var ctx = canvas.getContext("2d");
|
||||
|
||||
|
||||
async function setupCamera() {
|
||||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
||||
throw new Error(
|
||||
'Browser API navigator.mediaDevices.getUserMedia not available');
|
||||
}
|
||||
|
||||
const video = document.getElementById('video');
|
||||
video.width = videoWidth;
|
||||
video.height = videoHeight;
|
||||
|
||||
|
||||
const mobile = isMobile();
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
'audio': false,
|
||||
'video': {
|
||||
facingMode: 'user',
|
||||
width: mobile ? undefined : videoWidth,
|
||||
height: mobile ? undefined : videoHeight,
|
||||
},
|
||||
});
|
||||
|
||||
video.srcObject = stream;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
video.onloadedmetadata = () => {
|
||||
resolve(video);
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
async function loadVideo() {
|
||||
const video = await setupCamera();
|
||||
video.play();
|
||||
|
||||
return video;
|
||||
}
|
||||
|
||||
function detectPoseInRealTime(video) {
|
||||
const flipHorizontal = true;
|
||||
async function poseDetectionFrame() {
|
||||
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
|
||||
requestAnimationFrame(poseDetectionFrame);
|
||||
|
||||
}
|
||||
|
||||
poseDetectionFrame();
|
||||
}
|
||||
|
||||
async function bindPage() {
|
||||
|
||||
let video;
|
||||
|
||||
try {
|
||||
video = await loadVideo();
|
||||
} catch (e) {
|
||||
let info = document.getElementById('info');
|
||||
info.textContent = 'this browser does not support video capture,' +
|
||||
'or this device does not have a camera';
|
||||
info.style.display = 'block';
|
||||
throw e;
|
||||
}
|
||||
|
||||
detectPoseInRealTime(video);
|
||||
|
||||
}
|
||||
|
||||
navigator.getUserMedia = navigator.getUserMedia ||
|
||||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
|
||||
// kick off the demo
|
||||
bindPage();
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
||||
$('#capture-button').click(function(e){
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
})
|
12
script.py
Normal file
12
script.py
Normal file
@ -0,0 +1,12 @@
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
sketchpad_url = 'templates/sketchpad_input.html'
|
||||
all_io_url = 'templates/all_io.html'
|
||||
sketchpad_page = open(sketchpad_url)
|
||||
all_io_page = open(all_io_url)
|
||||
sketchpad_soup = BeautifulSoup(sketchpad_page.read())
|
||||
all_io_soup = BeautifulSoup(all_io_page.read())
|
||||
input_tag = all_io_soup.find("div", {"id": "input"})
|
||||
input_tag.replace_with(sketchpad_soup)
|
||||
f = open("templates/tmp_html.html", "w")
|
||||
f.write(str(all_io_soup.prettify))
|
94
templates/all_io.html
Normal file
94
templates/all_io.html
Normal file
@ -0,0 +1,94 @@
|
||||
|
||||
<!doctype html>
|
||||
<script src="../js/all-io.js"></script>
|
||||
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
|
||||
<title>Gradio</title>
|
||||
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Gradio</a>
|
||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarsExampleDefault" aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
<main role="main" class="container starter-template">
|
||||
<div class="row">
|
||||
|
||||
<!-- INPUT
|
||||
====================================================================================================================================================== -->
|
||||
<div id="input"></div>
|
||||
<!-- OUTPUT
|
||||
====================================================================================================================================================== -->
|
||||
<div id="output"></div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</main><!-- /.container -->
|
||||
|
||||
<footer class="footer">
|
||||
<div class="container" >
|
||||
<span class="text-muted">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a href="https://github.com/abidlabs/gradiome" target="_blank" class="fa fa-github"></a>
|
||||
|
||||
</span>
|
||||
|
||||
<span class="text-muted pull-right">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank" class="fa fa-facebook"></a>
|
||||
<a href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank" class="fa fa-twitter"></a>
|
||||
|
||||
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
6
templates/class_output.html
Normal file
6
templates/class_output.html
Normal file
@ -0,0 +1,6 @@
|
||||
<div class="col-6">
|
||||
<h5>Text Output:</h5>
|
||||
<canvas id="predict_canvas" width="400" height="400" style="background-color:black"></canvas><br>
|
||||
</div>
|
||||
|
||||
<script src="../js/class-output.js"></script>
|
106
templates/emotion_detector.html
Normal file
106
templates/emotion_detector.html
Normal file
@ -0,0 +1,106 @@
|
||||
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
|
||||
<title>Gradio: Emotion Detector</title>
|
||||
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Emotion Detector</a>
|
||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarsExampleDefault" aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<main role="main" class="container starter-template">
|
||||
|
||||
|
||||
<div class="row">
|
||||
<div class="col-6">
|
||||
<h5>See if we can guess what emotion you're expressing!</h5>
|
||||
<canvas id="canvas" width="400" height="400" style="background-color:black">
|
||||
<video id="video" playsinline style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br>
|
||||
<!-- <input type="file" onchange="previewFile()"><br>
|
||||
<img src="" height="200" alt="Image preview...">
|
||||
-->
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="capture-button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<h5>Predicted emotion appears here</h5>
|
||||
<canvas id="predict_canvas" width="400" height="400" style="background-color:black"></canvas><br>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</main><!-- /.container -->
|
||||
|
||||
<footer class="footer">
|
||||
<div class="container" >
|
||||
<span class="text-muted">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a href="https://github.com/abidlabs/gradiome" target="_blank" class="fa fa-github"></a>
|
||||
|
||||
</span>
|
||||
|
||||
<span class="text-muted pull-right">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank" class="fa fa-facebook"></a>
|
||||
<a href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank" class="fa fa-twitter"></a>
|
||||
|
||||
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
<script src="../js/emotion-detector.js"></script>
|
||||
</body>
|
||||
</html>
|
10
templates/sketchpad_input.html
Normal file
10
templates/sketchpad_input.html
Normal file
@ -0,0 +1,10 @@
|
||||
<div class="col-6">
|
||||
<h5>Sketch Pad Input: (Use your cursor to draw)</h5>
|
||||
<canvas id="canvas" width="400" height="400"></canvas><br>
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="submit-button">Recognize</button>
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="../js/sketchpad-input.js"></script>
|
86
templates/tmp_html.html
Normal file
86
templates/tmp_html.html
Normal file
@ -0,0 +1,86 @@
|
||||
<bound method Tag.prettify of <!DOCTYPE html>
|
||||
<html><head><script src="../js/all-io.js"></script>
|
||||
<script crossorigin="anonymous" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" src="https://code.jquery.com/jquery-3.2.1.slim.min.js"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
<meta charset="utf-8"/>
|
||||
<meta content="width=device-width, initial-scale=1, shrink-to-fit=no" name="viewport"/>
|
||||
<meta content="" name="description"/>
|
||||
<meta content="" name="author"/>
|
||||
<title>Gradio</title>
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet"/>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet"/>
|
||||
</head><body>
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Gradio</a>
|
||||
<button aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation" class="navbar-toggler" data-target="#navbarsExampleDefault" data-toggle="collapse" type="button">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
<main class="container starter-template" role="main">
|
||||
<div class="row">
|
||||
<!-- INPUT
|
||||
====================================================================================================================================================== -->
|
||||
<html><body><div class="col-6">
|
||||
<h5>Webcam Input:</h5>
|
||||
<canvas height="400" id="canvas" style="background-color:black" width="400">
|
||||
<video id="video" playsinline="" style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br/>
|
||||
<div aria-label="Basic example" class="btn-group" role="group">
|
||||
<button class="btn btn-primary" id="capture-button" type="button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button class="btn btn-secondary" id="clear-button" type="button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
<script src="../js/webcam-input.js"></script>
|
||||
</body></html>
|
||||
<!-- OUTPUT
|
||||
====================================================================================================================================================== -->
|
||||
<html><body><div class="col-6">
|
||||
<h5>Text Output:</h5>
|
||||
<canvas height="400" id="predict_canvas" style="background-color:black" width="400"></canvas><br/>
|
||||
</div>
|
||||
<script src="../js/class-output.js"></script>
|
||||
</body></html>
|
||||
</div>
|
||||
</main><!-- /.container -->
|
||||
<footer class="footer">
|
||||
<div class="container">
|
||||
<span class="text-muted">
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a class="fa fa-github" href="https://github.com/abidlabs/gradiome" target="_blank"></a>
|
||||
</span>
|
||||
<span class="text-muted pull-right">
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a class="fa fa-facebook" href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank"></a>
|
||||
<a class="fa fa-twitter" href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank"></a>
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
</body></html>
|
||||
>
|
19
templates/webcam_input.html
Normal file
19
templates/webcam_input.html
Normal file
@ -0,0 +1,19 @@
|
||||
<div class="col-6">
|
||||
<h5>Webcam Input:</h5>
|
||||
<canvas id="canvas" width="400" height="400" style="background-color:black">
|
||||
<video id="video" playsinline style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br>
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="capture-button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="../js/webcam-input.js"></script>
|
Loading…
x
Reference in New Issue
Block a user