mirror of
https://github.com/gradio-app/gradio.git
synced 2025-01-18 10:44:33 +08:00
seperated inputs and outputs, added webcam interface
This commit is contained in:
parent
c7e6b5857b
commit
2b68c57792
136
.ipynb_checkpoints/Emotion Detector-checkpoint.ipynb
Normal file
136
.ipynb_checkpoints/Emotion Detector-checkpoint.ipynb
Normal file
@ -0,0 +1,136 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
|
||||
"from keras.losses import categorical_crossentropy\n",
|
||||
"from keras.optimizers import Adam\n",
|
||||
"from keras.regularizers import l2\n",
|
||||
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:191: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 191 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" input_soup = BeautifulSoup(input_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:192: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 192 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" output_soup = BeautifulSoup(output_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:196: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 196 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='sketchpad',output='class',model_obj=model, model_type='keras')\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
125
Emotion Detector.ipynb
Normal file
125
Emotion Detector.ipynb
Normal file
@ -0,0 +1,125 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
|
||||
"from keras.losses import categorical_crossentropy\n",
|
||||
"from keras.optimizers import Adam\n",
|
||||
"from keras.regularizers import l2\n",
|
||||
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:191: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 191 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" input_soup = BeautifulSoup(input_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:192: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 192 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" output_soup = BeautifulSoup(output_page.read())\n",
|
||||
"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py:196: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 196 of the file C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 223, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras')\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
219
Usage.ipynb
219
Usage.ipynb
@ -16,7 +16,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -38,15 +38,22 @@
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/1\n",
|
||||
"60000/60000 [==============================] - 25s 417us/step - loss: 0.2184 - acc: 0.9352\n",
|
||||
"10000/10000 [==============================] - 1s 89us/step\n"
|
||||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
|
||||
"11493376/11490434 [==============================] - 7s 1us/step\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\layers\\core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"60000/60000 [==============================] - 25s 413us/sample - loss: 0.2209 - acc: 0.9343\n",
|
||||
"10000/10000 [==============================] - 1s 72us/sample - loss: 0.0958 - acc: 0.9712\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[0.10298147459961474, 0.9698]"
|
||||
"[0.09579291099403053, 0.9712]"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
@ -83,15 +90,203 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"ename": "OSError",
|
||||
"evalue": "[Errno 10048] error while attempting to bind on address ('127.0.0.1', 5679): only one usage of each socket address (protocol/network address/port) is normally permitted",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-6-5cd3a5d36036>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDrawADigit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel_obj\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'keras'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;32m~\\Desktop\\gradiome\\gradio.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 124\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 125\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 126\u001b[1;33m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 127\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 128\u001b[0m \u001b[1;32masync\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcommunicate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mwebsocket\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mpath\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Desktop\\gradiome\\gradio.py\u001b[0m in \u001b[0;36mstart\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 90\u001b[0m \u001b[0mwebbrowser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'file://'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrealpath\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_template_path\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 91\u001b[0m \u001b[0mstart_server\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwebsockets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserve\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcommunicate\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mLOCALHOST_IP\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSOCKET_PORT\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 92\u001b[1;33m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_until_complete\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstart_server\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 93\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 94\u001b[0m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_forever\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\nest_asyncio.py\u001b[0m in \u001b[0;36mrun_until_complete\u001b[1;34m(self, future)\u001b[0m\n\u001b[0;32m 59\u001b[0m \u001b[1;32mwhile\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[0mrun_once\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 61\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 62\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 63\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_run_until_complete_orig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfuture\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\futures.py\u001b[0m in \u001b[0;36mresult\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__log_traceback\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 177\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 178\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 179\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 180\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m__step\u001b[1;34m(***failed resolving arguments***)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[1;31m# We use the `send` method directly, because coroutines\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;31m# don't have `__iter__` and `__next__` methods.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mthrow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m_wrap_awaitable\u001b[1;34m(awaitable)\u001b[0m\n\u001b[0;32m 601\u001b[0m \u001b[0mthat\u001b[0m \u001b[0mwill\u001b[0m \u001b[0mlater\u001b[0m \u001b[0mbe\u001b[0m \u001b[0mwrapped\u001b[0m \u001b[1;32min\u001b[0m \u001b[0ma\u001b[0m \u001b[0mTask\u001b[0m \u001b[0mby\u001b[0m \u001b[0mensure_future\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 602\u001b[0m \"\"\"\n\u001b[1;32m--> 603\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;32myield\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mawaitable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__await__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 604\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 605\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\websockets\\py35\\server.py\u001b[0m in \u001b[0;36m__await_impl__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[1;31m# Duplicated with __iter__ because Python 3.7 requires an async function\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[1;31m# (as explained in __await__ below) which Python 3.4 doesn't support.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 13\u001b[1;33m \u001b[0mserver\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mawait\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_creating_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 14\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mserver\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 15\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\base_events.py\u001b[0m in \u001b[0;36mcreate_server\u001b[1;34m(self, protocol_factory, host, port, family, flags, sock, backlog, ssl, reuse_address, reuse_port, ssl_handshake_timeout, start_serving)\u001b[0m\n\u001b[0;32m 1365\u001b[0m raise OSError(err.errno, 'error while attempting '\n\u001b[0;32m 1366\u001b[0m \u001b[1;34m'to bind on address %r: %s'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1367\u001b[1;33m % (sa, err.strerror.lower())) from None\n\u001b[0m\u001b[0;32m 1368\u001b[0m \u001b[0mcompleted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1369\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mOSError\u001b[0m: [Errno 10048] error while attempting to bind on address ('127.0.0.1', 5679): only one usage of each socket address (protocol/network address/port) is normally permitted"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using keras model\n",
|
||||
"Using keras model\n"
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 658, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 658, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 658, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 658, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 135, in communicate\n",
|
||||
" imgstring = await websocket.recv()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -102,9 +297,9 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
@ -116,7 +311,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
171
gradio.py
171
gradio.py
@ -8,11 +8,13 @@ from io import BytesIO
|
||||
import numpy as np
|
||||
import os
|
||||
import webbrowser
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
LOCALHOST_IP = '127.0.0.1'
|
||||
SOCKET_PORT = 5679
|
||||
SOCKET_PORT = 5680
|
||||
|
||||
|
||||
|
||||
def resize_and_crop(img, size, crop_type='top'):
|
||||
@ -67,33 +69,38 @@ def resize_and_crop(img, size, crop_type='top'):
|
||||
return img
|
||||
|
||||
|
||||
class AbstractInterface(ABC):
|
||||
class AbstractInput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio interfaces should have.
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
"""
|
||||
|
||||
def __init__(self, model_type, model_obj, **model_params):
|
||||
"""
|
||||
:param model_type: what kind of trained model, can be 'keras' or 'sklearn'.
|
||||
:param model_obj: the model object, such as a sklearn classifier or keras model.
|
||||
:param model_params: additional model parameters.
|
||||
"""
|
||||
self.model_type = model_type
|
||||
self.model_obj = model_obj
|
||||
self.model_params = model_params
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def start(self):
|
||||
|
||||
@abstractmethod
|
||||
def _get_template_path(self):
|
||||
"""
|
||||
Standard method shared by interfaces that launches a websocket at a specified IP address.
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
webbrowser.open('file://' + os.path.realpath(self._get_template_path()))
|
||||
start_server = websockets.serve(self.communicate, LOCALHOST_IP, SOCKET_PORT)
|
||||
asyncio.get_event_loop().run_until_complete(start_server)
|
||||
try:
|
||||
asyncio.get_event_loop().run_forever()
|
||||
except RuntimeError: # Runtime errors are thrown in jupyter notebooks because of async.
|
||||
pass
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _pre_process(self):
|
||||
"""
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
pass
|
||||
|
||||
class AbstractOutput(ABC):
|
||||
"""
|
||||
An abstract class for defining the methods that all gradio inputs should have.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
"""
|
||||
super().__init__()
|
||||
|
||||
@abstractmethod
|
||||
def _get_template_path(self):
|
||||
@ -103,14 +110,100 @@ class AbstractInterface(ABC):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def communicate(self, websocket, path):
|
||||
def _post_process(self):
|
||||
"""
|
||||
All interfaces should define a custom method that defines how they communicate with the websocket.
|
||||
All interfaces should define a method that returns the path to its template.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class DrawADigit(AbstractInterface):
|
||||
class Sketchpad(AbstractInput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/sketchpad_input.html'
|
||||
|
||||
def _pre_process(self, imgstring):
|
||||
"""
|
||||
"""
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (28, 28))
|
||||
array = np.array(im).flatten().reshape(1, -1)
|
||||
return array
|
||||
|
||||
|
||||
class Webcam(AbstractInput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/webcam_input.html'
|
||||
|
||||
def _pre_process(self, imgstring):
|
||||
"""
|
||||
"""
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (48, 48))
|
||||
array = np.array(im).flatten().reshape(1,48,48,1)
|
||||
return array
|
||||
|
||||
class Class(AbstractOutput):
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/class_output.html'
|
||||
|
||||
def _post_process(self, prediction):
|
||||
"""
|
||||
"""
|
||||
emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
|
||||
return emotion_dict[prediction]
|
||||
|
||||
registry = {
|
||||
'webcam':Webcam,
|
||||
'sketchpad' :Sketchpad,
|
||||
'class' :Class,
|
||||
}
|
||||
|
||||
class Interface():
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, input, output, model_obj, model_type, **model_params):
|
||||
"""
|
||||
:param model_type: what kind of trained model, can be 'keras' or 'sklearn'.
|
||||
:param model_obj: the model object, such as a sklearn classifier or keras model.
|
||||
:param model_params: additional model parameters.
|
||||
"""
|
||||
self.input_interface = registry[input]()
|
||||
self.output_interface = registry[output]()
|
||||
self.model_type = model_type
|
||||
self.model_obj = model_obj
|
||||
self.model_params = model_params
|
||||
|
||||
def _build_template(self):
|
||||
input_template_path = self.input_interface._get_template_path()
|
||||
output_template_path = self.output_interface._get_template_path()
|
||||
input_page = open(input_template_path)
|
||||
output_page = open(output_template_path)
|
||||
input_soup = BeautifulSoup(input_page.read())
|
||||
output_soup = BeautifulSoup(output_page.read())
|
||||
|
||||
all_io_url = 'templates/all_io.html'
|
||||
all_io_page = open(all_io_url)
|
||||
all_io_soup = BeautifulSoup(all_io_page.read())
|
||||
input_tag = all_io_soup.find("div", {"id": "input"})
|
||||
output_tag = all_io_soup.find("div", {"id": "output"})
|
||||
|
||||
input_tag.replace_with(input_soup)
|
||||
output_tag.replace_with(output_soup)
|
||||
|
||||
f = open("templates/tmp_html.html", "w")
|
||||
f.write(str(all_io_soup.prettify))
|
||||
return 'templates/tmp_html.html'
|
||||
|
||||
def predict(self, array):
|
||||
if self.model_type=='sklearn':
|
||||
return self.model_obj.predict(array)[0]
|
||||
@ -119,11 +212,6 @@ class DrawADigit(AbstractInterface):
|
||||
else:
|
||||
raise ValueError('model_type must be sklearn.')
|
||||
|
||||
def _get_template_path(self):
|
||||
return 'templates/draw_a_digit.html'
|
||||
|
||||
def start(self):
|
||||
super().start()
|
||||
|
||||
async def communicate(self, websocket, path):
|
||||
"""
|
||||
@ -132,12 +220,19 @@ class DrawADigit(AbstractInterface):
|
||||
:param path: ignored
|
||||
"""
|
||||
while True:
|
||||
imgstring = await websocket.recv()
|
||||
content = imgstring.split(';')[1]
|
||||
image_encoded = content.split(',')[1]
|
||||
body = base64.decodebytes(image_encoded.encode('utf-8'))
|
||||
im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')
|
||||
im = resize_and_crop(im, (28, 28))
|
||||
array = np.array(im).flatten().reshape(1, -1)
|
||||
prediction = self.predict(array)
|
||||
await websocket.send(str(prediction))
|
||||
processed_input = self.input_interface._pre_process(await websocket.recv())
|
||||
prediction = self.predict(processed_input)
|
||||
processed_output = self.output_interface._post_process(prediction)
|
||||
await websocket.send(str(processed_output))
|
||||
|
||||
def launch(self):
|
||||
"""
|
||||
Standard method shared by interfaces that launches a websocket at a specified IP address.
|
||||
"""
|
||||
webbrowser.open('file://' + os.path.realpath(self._build_template()))
|
||||
start_server = websockets.serve(self.communicate, LOCALHOST_IP, SOCKET_PORT)
|
||||
asyncio.get_event_loop().run_until_complete(start_server)
|
||||
try:
|
||||
asyncio.get_event_loop().run_forever()
|
||||
except RuntimeError: # Runtime errors are thrown in jupyter notebooks because of async.
|
||||
pass
|
@ -176,7 +176,7 @@
|
||||
<p class="card-text"><strong>Emotion Detector</strong>: allow users to make emotions through their webcam and detect their emotion with your model.</p>
|
||||
<div class="d-flex justify-content-between align-items-center">
|
||||
<div class="btn-group">
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary">View</button>
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary" onclick="location.href='templates/emotion_detector.html';">View</button>
|
||||
<button type="button" class="btn btn-sm btn-outline-secondary">Source</button>
|
||||
</div>
|
||||
<small class="text-muted"></small>
|
||||
|
13
js/all-io.js
Normal file
13
js/all-io.js
Normal file
@ -0,0 +1,13 @@
|
||||
try {
|
||||
var ws = new WebSocket("ws://127.0.0.1:5680/")
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
||||
|
||||
const sleep = (milliseconds) => {
|
||||
return new Promise(resolve => setTimeout(resolve, milliseconds))
|
||||
}
|
43
js/class-output.js
Normal file
43
js/class-output.js
Normal file
@ -0,0 +1,43 @@
|
||||
var predict_canvas = document.getElementById("predict_canvas");
|
||||
var predict_ctx = predict_canvas.getContext("2d");
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
ws.onmessage = function (event) {
|
||||
console.log(event.data);
|
||||
predict_ctx.clearRect(0, 0, 400, 400); // Clears the canvas
|
||||
predict_ctx.font = "60px Arial";
|
||||
predict_ctx.fillStyle = "white";
|
||||
sleep(300).then(() => {
|
||||
predict_ctx.textAlign = "center";
|
||||
predict_ctx.fillText(event.data, 200, 200);
|
||||
})
|
||||
|
||||
}
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
180
js/emotion-detector.js
Normal file
180
js/emotion-detector.js
Normal file
@ -0,0 +1,180 @@
|
||||
videoWidth = 400;
|
||||
videoHeight = 400;
|
||||
|
||||
function isAndroid() {
|
||||
return /Android/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isiOS() {
|
||||
return /iPhone|iPad|iPod/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isMobile() {
|
||||
return isAndroid() || isiOS();
|
||||
}
|
||||
|
||||
var predict_canvas = document.getElementById("predict_canvas");
|
||||
var predict_ctx = predict_canvas.getContext("2d");
|
||||
var canvas = document.getElementById("canvas");
|
||||
var ctx = canvas.getContext("2d");
|
||||
|
||||
|
||||
const sleep = (milliseconds) => {
|
||||
return new Promise(resolve => setTimeout(resolve, milliseconds))
|
||||
}
|
||||
|
||||
async function setupCamera() {
|
||||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
||||
throw new Error(
|
||||
'Browser API navigator.mediaDevices.getUserMedia not available');
|
||||
}
|
||||
|
||||
const video = document.getElementById('video');
|
||||
video.width = videoWidth;
|
||||
video.height = videoHeight;
|
||||
|
||||
|
||||
const mobile = isMobile();
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
'audio': false,
|
||||
'video': {
|
||||
facingMode: 'user',
|
||||
width: mobile ? undefined : videoWidth,
|
||||
height: mobile ? undefined : videoHeight,
|
||||
},
|
||||
});
|
||||
|
||||
video.srcObject = stream;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
video.onloadedmetadata = () => {
|
||||
resolve(video);
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
async function loadVideo() {
|
||||
const video = await setupCamera();
|
||||
video.play();
|
||||
|
||||
return video;
|
||||
}
|
||||
|
||||
function detectPoseInRealTime(video) {
|
||||
const flipHorizontal = true;
|
||||
async function poseDetectionFrame() {
|
||||
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
|
||||
requestAnimationFrame(poseDetectionFrame);
|
||||
|
||||
}
|
||||
|
||||
poseDetectionFrame();
|
||||
}
|
||||
|
||||
async function bindPage() {
|
||||
|
||||
let video;
|
||||
|
||||
try {
|
||||
video = await loadVideo();
|
||||
} catch (e) {
|
||||
let info = document.getElementById('info');
|
||||
info.textContent = 'this browser does not support video capture,' +
|
||||
'or this device does not have a camera';
|
||||
info.style.display = 'block';
|
||||
throw e;
|
||||
}
|
||||
|
||||
detectPoseInRealTime(video);
|
||||
|
||||
}
|
||||
|
||||
navigator.getUserMedia = navigator.getUserMedia ||
|
||||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
|
||||
// kick off the demo
|
||||
bindPage();
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
var ws = new WebSocket("ws://127.0.0.1:5679/")
|
||||
ws.onerror = function(evt) {
|
||||
notifyError(evt)
|
||||
};
|
||||
|
||||
ws.onmessage = function (event) {
|
||||
var emotion_dict = {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
|
||||
console.log(event.data);
|
||||
predict_ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); // Clears the canvas
|
||||
predict_ctx.font = "60px Arial";
|
||||
predict_ctx.fillStyle = "white";
|
||||
sleep(300).then(() => {
|
||||
// predict_ctx.fillText(emotion_dict[event.data], 110, 310);
|
||||
predict_ctx.textAlign = "center";
|
||||
predict_ctx.fillText(emotion_dict[event.data], 200, 200);
|
||||
})
|
||||
|
||||
}
|
||||
} catch (e) {
|
||||
notifyError(e)
|
||||
}
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
||||
$('#capture-button').click(function(e){
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
})
|
100
js/sketchpad-input.js
Normal file
100
js/sketchpad-input.js
Normal file
@ -0,0 +1,100 @@
|
||||
var canvas = document.getElementById('canvas');
|
||||
context = canvas.getContext("2d");
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$('#canvas').mousedown(function(e){
|
||||
var mouseX = e.pageX - this.getBoundingClientRect().left + document.documentElement.scrollLeft;
|
||||
var mouseY = e.pageY - this.getBoundingClientRect().top + document.documentElement.scrollTop
|
||||
;
|
||||
|
||||
paint = true;
|
||||
addClick(mouseX, mouseY);
|
||||
redraw();
|
||||
});
|
||||
$('#canvas').mousemove(function(e){
|
||||
if(paint){
|
||||
addClick(e.pageX - this.getBoundingClientRect().left + document.documentElement.scrollLeft, e.pageY - this.getBoundingClientRect().top + document.documentElement.scrollTop, true);
|
||||
redraw();
|
||||
}
|
||||
});
|
||||
$('#canvas').mouseup(function(e){
|
||||
paint = false;
|
||||
});
|
||||
$('#canvas').mouseleave(function(e){
|
||||
paint = false;
|
||||
});
|
||||
var clickX = new Array();
|
||||
var clickY = new Array();
|
||||
var clickDrag = new Array();
|
||||
var paint;
|
||||
|
||||
function addClick(x, y, dragging)
|
||||
{
|
||||
clickX.push(x);
|
||||
clickY.push(y);
|
||||
clickDrag.push(dragging);
|
||||
}
|
||||
function redraw(){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
|
||||
context.strokeStyle = "#FFF";
|
||||
context.lineJoin = "round";
|
||||
context.lineWidth = 25;
|
||||
|
||||
for(var i=0; i < clickX.length; i++) {
|
||||
context.beginPath();
|
||||
if(clickDrag[i] && i){
|
||||
context.moveTo(clickX[i-1], clickY[i-1]);
|
||||
}else{
|
||||
context.moveTo(clickX[i]-1, clickY[i]);
|
||||
}
|
||||
context.lineTo(clickX[i], clickY[i]);
|
||||
context.closePath();
|
||||
context.stroke();
|
||||
}
|
||||
}
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
152
js/webcam-input.js
Normal file
152
js/webcam-input.js
Normal file
@ -0,0 +1,152 @@
|
||||
videoWidth = 400;
|
||||
videoHeight = 400;
|
||||
|
||||
function isAndroid() {
|
||||
return /Android/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isiOS() {
|
||||
return /iPhone|iPad|iPod/i.test(navigator.userAgent);
|
||||
}
|
||||
|
||||
function isMobile() {
|
||||
return isAndroid() || isiOS();
|
||||
}
|
||||
|
||||
var canvas = document.getElementById("canvas");
|
||||
var ctx = canvas.getContext("2d");
|
||||
|
||||
|
||||
async function setupCamera() {
|
||||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
||||
throw new Error(
|
||||
'Browser API navigator.mediaDevices.getUserMedia not available');
|
||||
}
|
||||
|
||||
const video = document.getElementById('video');
|
||||
video.width = videoWidth;
|
||||
video.height = videoHeight;
|
||||
|
||||
|
||||
const mobile = isMobile();
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
'audio': false,
|
||||
'video': {
|
||||
facingMode: 'user',
|
||||
width: mobile ? undefined : videoWidth,
|
||||
height: mobile ? undefined : videoHeight,
|
||||
},
|
||||
});
|
||||
|
||||
video.srcObject = stream;
|
||||
|
||||
return new Promise((resolve) => {
|
||||
video.onloadedmetadata = () => {
|
||||
resolve(video);
|
||||
};
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
async function loadVideo() {
|
||||
const video = await setupCamera();
|
||||
video.play();
|
||||
|
||||
return video;
|
||||
}
|
||||
|
||||
function detectPoseInRealTime(video) {
|
||||
const flipHorizontal = true;
|
||||
async function poseDetectionFrame() {
|
||||
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
|
||||
requestAnimationFrame(poseDetectionFrame);
|
||||
|
||||
}
|
||||
|
||||
poseDetectionFrame();
|
||||
}
|
||||
|
||||
async function bindPage() {
|
||||
|
||||
let video;
|
||||
|
||||
try {
|
||||
video = await loadVideo();
|
||||
} catch (e) {
|
||||
let info = document.getElementById('info');
|
||||
info.textContent = 'this browser does not support video capture,' +
|
||||
'or this device does not have a camera';
|
||||
info.style.display = 'block';
|
||||
throw e;
|
||||
}
|
||||
|
||||
detectPoseInRealTime(video);
|
||||
|
||||
}
|
||||
|
||||
navigator.getUserMedia = navigator.getUserMedia ||
|
||||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
|
||||
// kick off the demo
|
||||
bindPage();
|
||||
|
||||
|
||||
function notifyError(error) {
|
||||
$.notify({
|
||||
// options
|
||||
message: 'Not able to communicate with model (is python code still running?)'
|
||||
},{
|
||||
// settings
|
||||
type: 'danger',
|
||||
animate: {
|
||||
enter: 'animated fadeInDown',
|
||||
exit: 'animated fadeOutUp'
|
||||
},
|
||||
placement: {
|
||||
from: "bottom",
|
||||
align: "right"
|
||||
},
|
||||
delay: 5000
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
$('#clear-button').click(function(e){
|
||||
context.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
clickX = new Array();
|
||||
clickY = new Array();
|
||||
clickDrag = new Array();
|
||||
context.fillStyle = "black";
|
||||
context.fillRect(0, 0, 400, 400);
|
||||
ctx.clearRect(0, 0, context.canvas.width, context.canvas.height); // Clears the canvas
|
||||
})
|
||||
|
||||
|
||||
$('#submit-button').click(function(e){
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
|
||||
})
|
||||
|
||||
$('#capture-button').click(function(e){
|
||||
ctx.clearRect(0, 0, videoWidth, videoHeight);
|
||||
ctx.save();
|
||||
ctx.scale(-1, 1);
|
||||
ctx.translate(-videoWidth, 0);
|
||||
ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
|
||||
ctx.restore();
|
||||
var dataURL = canvas.toDataURL("image/png");
|
||||
ws.send(dataURL, function(e){
|
||||
notifyError(e)
|
||||
});
|
||||
})
|
12
script.py
Normal file
12
script.py
Normal file
@ -0,0 +1,12 @@
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
sketchpad_url = 'templates/sketchpad_input.html'
|
||||
all_io_url = 'templates/all_io.html'
|
||||
sketchpad_page = open(sketchpad_url)
|
||||
all_io_page = open(all_io_url)
|
||||
sketchpad_soup = BeautifulSoup(sketchpad_page.read())
|
||||
all_io_soup = BeautifulSoup(all_io_page.read())
|
||||
input_tag = all_io_soup.find("div", {"id": "input"})
|
||||
input_tag.replace_with(sketchpad_soup)
|
||||
f = open("templates/tmp_html.html", "w")
|
||||
f.write(str(all_io_soup.prettify))
|
94
templates/all_io.html
Normal file
94
templates/all_io.html
Normal file
@ -0,0 +1,94 @@
|
||||
|
||||
<!doctype html>
|
||||
<script src="../js/all-io.js"></script>
|
||||
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
|
||||
<title>Gradio</title>
|
||||
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Gradio</a>
|
||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarsExampleDefault" aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
<main role="main" class="container starter-template">
|
||||
<div class="row">
|
||||
|
||||
<!-- INPUT
|
||||
====================================================================================================================================================== -->
|
||||
<div id="input"></div>
|
||||
<!-- OUTPUT
|
||||
====================================================================================================================================================== -->
|
||||
<div id="output"></div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</main><!-- /.container -->
|
||||
|
||||
<footer class="footer">
|
||||
<div class="container" >
|
||||
<span class="text-muted">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a href="https://github.com/abidlabs/gradiome" target="_blank" class="fa fa-github"></a>
|
||||
|
||||
</span>
|
||||
|
||||
<span class="text-muted pull-right">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank" class="fa fa-facebook"></a>
|
||||
<a href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank" class="fa fa-twitter"></a>
|
||||
|
||||
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
6
templates/class_output.html
Normal file
6
templates/class_output.html
Normal file
@ -0,0 +1,6 @@
|
||||
<div class="col-6">
|
||||
<h5>Text Output:</h5>
|
||||
<canvas id="predict_canvas" width="400" height="400" style="background-color:black"></canvas><br>
|
||||
</div>
|
||||
|
||||
<script src="../js/class-output.js"></script>
|
106
templates/emotion_detector.html
Normal file
106
templates/emotion_detector.html
Normal file
@ -0,0 +1,106 @@
|
||||
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="">
|
||||
<meta name="author" content="">
|
||||
|
||||
<title>Gradio: Emotion Detector</title>
|
||||
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet">
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Emotion Detector</a>
|
||||
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarsExampleDefault" aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<main role="main" class="container starter-template">
|
||||
|
||||
|
||||
<div class="row">
|
||||
<div class="col-6">
|
||||
<h5>See if we can guess what emotion you're expressing!</h5>
|
||||
<canvas id="canvas" width="400" height="400" style="background-color:black">
|
||||
<video id="video" playsinline style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br>
|
||||
<!-- <input type="file" onchange="previewFile()"><br>
|
||||
<img src="" height="200" alt="Image preview...">
|
||||
-->
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="capture-button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6">
|
||||
<h5>Predicted emotion appears here</h5>
|
||||
<canvas id="predict_canvas" width="400" height="400" style="background-color:black"></canvas><br>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
</main><!-- /.container -->
|
||||
|
||||
<footer class="footer">
|
||||
<div class="container" >
|
||||
<span class="text-muted">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a href="https://github.com/abidlabs/gradiome" target="_blank" class="fa fa-github"></a>
|
||||
|
||||
</span>
|
||||
|
||||
<span class="text-muted pull-right">
|
||||
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank" class="fa fa-facebook"></a>
|
||||
<a href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank" class="fa fa-twitter"></a>
|
||||
|
||||
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
<script src="../js/emotion-detector.js"></script>
|
||||
</body>
|
||||
</html>
|
10
templates/sketchpad_input.html
Normal file
10
templates/sketchpad_input.html
Normal file
@ -0,0 +1,10 @@
|
||||
<div class="col-6">
|
||||
<h5>Sketch Pad Input: (Use your cursor to draw)</h5>
|
||||
<canvas id="canvas" width="400" height="400"></canvas><br>
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="submit-button">Recognize</button>
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="../js/sketchpad-input.js"></script>
|
86
templates/tmp_html.html
Normal file
86
templates/tmp_html.html
Normal file
@ -0,0 +1,86 @@
|
||||
<bound method Tag.prettify of <!DOCTYPE html>
|
||||
<html><head><script src="../js/all-io.js"></script>
|
||||
<script crossorigin="anonymous" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" src="https://code.jquery.com/jquery-3.2.1.slim.min.js"></script>
|
||||
<script src="../js/bootstrap.min.js"></script>
|
||||
<script src="../js/bootstrap-notify.min.js"></script>
|
||||
<meta charset="utf-8"/>
|
||||
<meta content="width=device-width, initial-scale=1, shrink-to-fit=no" name="viewport"/>
|
||||
<meta content="" name="description"/>
|
||||
<meta content="" name="author"/>
|
||||
<title>Gradio</title>
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="../css/bootstrap.min.css" rel="stylesheet"/>
|
||||
<link href="../css/draw-a-digit.css" rel="stylesheet"/>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet"/>
|
||||
</head><body>
|
||||
<nav class="navbar navbar-expand-md navbar-dark bg-dark fixed-top">
|
||||
<a class="navbar-brand" href="#">Gradio</a>
|
||||
<button aria-controls="navbarsExampleDefault" aria-expanded="false" aria-label="Toggle navigation" class="navbar-toggler" data-target="#navbarsExampleDefault" data-toggle="collapse" type="button">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="collapse navbar-collapse" id="navbarsExampleDefault">
|
||||
<ul class="navbar-nav mr-auto">
|
||||
</ul>
|
||||
<ul class="navbar-nav">
|
||||
<!--<li class="nav-item">-->
|
||||
<!--<a class="nav-link" href="#">Help</a>-->
|
||||
<!--</li>-->
|
||||
<li class="nav-item ">
|
||||
<a class="nav-link" href="http://www.siliconprep.com"><em>Gradio</em>, a tool by Silicon School</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
<main class="container starter-template" role="main">
|
||||
<div class="row">
|
||||
<!-- INPUT
|
||||
====================================================================================================================================================== -->
|
||||
<html><body><div class="col-6">
|
||||
<h5>Webcam Input:</h5>
|
||||
<canvas height="400" id="canvas" style="background-color:black" width="400">
|
||||
<video id="video" playsinline="" style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br/>
|
||||
<div aria-label="Basic example" class="btn-group" role="group">
|
||||
<button class="btn btn-primary" id="capture-button" type="button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button class="btn btn-secondary" id="clear-button" type="button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
<script src="../js/webcam-input.js"></script>
|
||||
</body></html>
|
||||
<!-- OUTPUT
|
||||
====================================================================================================================================================== -->
|
||||
<html><body><div class="col-6">
|
||||
<h5>Text Output:</h5>
|
||||
<canvas height="400" id="predict_canvas" style="background-color:black" width="400"></canvas><br/>
|
||||
</div>
|
||||
<script src="../js/class-output.js"></script>
|
||||
</body></html>
|
||||
</div>
|
||||
</main><!-- /.container -->
|
||||
<footer class="footer">
|
||||
<div class="container">
|
||||
<span class="text-muted">
|
||||
<!-- Add font awesome icons -->
|
||||
Gradio is open-source, help make it better
|
||||
<a class="fa fa-github" href="https://github.com/abidlabs/gradiome" target="_blank"></a>
|
||||
</span>
|
||||
<span class="text-muted pull-right">
|
||||
<!-- Add font awesome icons -->
|
||||
Found this useful? Kindly spread the word
|
||||
<a class="fa fa-facebook" href="https://www.facebook.com/sharer/sharer.php?u=www.siliconprep.com" target="_blank"></a>
|
||||
<a class="fa fa-twitter" href="https://twitter.com/home?status=Check%20out%20Gradio%20tool%20at%20www.siliconprep.com" target="_blank"></a>
|
||||
</span>
|
||||
</div>
|
||||
</footer>
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
</body></html>
|
||||
>
|
19
templates/webcam_input.html
Normal file
19
templates/webcam_input.html
Normal file
@ -0,0 +1,19 @@
|
||||
<div class="col-6">
|
||||
<h5>Webcam Input:</h5>
|
||||
<canvas id="canvas" width="400" height="400" style="background-color:black">
|
||||
<video id="video" playsinline style=" -moz-transform: scaleX(-1);
|
||||
-o-transform: scaleX(-1);
|
||||
-webkit-transform: scaleX(-1);
|
||||
transform: scaleX(-1);
|
||||
display: none;
|
||||
">
|
||||
</video>
|
||||
</canvas><br>
|
||||
<div class="btn-group" role="group" aria-label="Basic example">
|
||||
<button type="button" class="btn btn-primary" id="capture-button">Capture</button>
|
||||
<!-- <button type="button" class="btn btn-primary" id="submit-button">Recognize</button> -->
|
||||
<button type="button" class="btn btn-secondary" id="clear-button">Clear</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="../js/webcam-input.js"></script>
|
Loading…
Reference in New Issue
Block a user