just removed notebooks

This commit is contained in:
Abubakar Abid 2019-04-28 23:41:31 -07:00
commit a9ab5e4317
44 changed files with 235 additions and 3132 deletions

1
.gitignore vendored
View File

@ -4,6 +4,7 @@ staticfiles
.env
*.sqlite3
.idea/*
*.ipynb
.ipynb_checkpoints/*
models/*
.models/*

View File

@ -1,476 +0,0 @@
{
"cells": [
{
"cell_type": "code",
<<<<<<< HEAD
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"x_train shape: (60000, 28, 28, 1)\n",
"60000 train samples\n",
"10000 test samples\n",
"Train on 60000 samples, validate on 10000 samples\n",
"Epoch 1/12\n"
]
}
],
"source": [
"'''Trains a simple convnet on the MNIST dataset.\n",
"Gets to 99.25% test accuracy after 12 epochs\n",
"(there is still a lot of margin for parameter tuning).\n",
"16 seconds per epoch on a GRID K520 GPU.\n",
"'''\n",
"\n",
"from __future__ import print_function\n",
"import keras\n",
"from keras.datasets import mnist\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Flatten\n",
"from keras.layers import Conv2D, MaxPooling2D\n",
"from keras import backend as K\n",
"\n",
"batch_size = 128\n",
"num_classes = 10\n",
"epochs = 12\n",
"\n",
"# input image dimensions\n",
"img_rows, img_cols = 28, 28\n",
"\n",
"# the data, split between train and test sets\n",
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
"\n",
"if K.image_data_format() == 'channels_first':\n",
" x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n",
" x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n",
" input_shape = (1, img_rows, img_cols)\n",
"else:\n",
" x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n",
" x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n",
" input_shape = (img_rows, img_cols, 1)\n",
"\n",
"x_train = x_train.astype('float32')\n",
"x_test = x_test.astype('float32')\n",
"x_train /= 255\n",
"x_test /= 255\n",
"print('x_train shape:', x_train.shape)\n",
"print(x_train.shape[0], 'train samples')\n",
"print(x_test.shape[0], 'test samples')\n",
"\n",
"# convert class vectors to binary class matrices\n",
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
"\n",
"model = Sequential()\n",
"model.add(Conv2D(32, kernel_size=(3, 3),\n",
" activation='relu',\n",
" input_shape=input_shape))\n",
"model.add(Conv2D(64, (3, 3), activation='relu'))\n",
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
"model.add(Dropout(0.25))\n",
"model.add(Flatten())\n",
"model.add(Dense(128, activation='relu'))\n",
"model.add(Dropout(0.5))\n",
"model.add(Dense(num_classes, activation='softmax'))\n",
"\n",
"model.compile(loss=keras.losses.categorical_crossentropy,\n",
" optimizer=keras.optimizers.Adadelta(),\n",
" metrics=['accuracy'])\n",
"\n",
"model.fit(x_train, y_train,\n",
" batch_size=batch_size,\n",
" epochs=epochs,\n",
" verbose=1,\n",
" validation_data=(x_test, y_test))\n",
"score = model.evaluate(x_test, y_test, verbose=0)\n",
"print('Test loss:', score[0])\n",
"print('Test accuracy:', score[1])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 1,
=======
"execution_count": 14,
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The autoreload extension is already loaded. To reload it, use:\n",
" %reload_ext autoreload\n"
]
}
],
"source": [
"import numpy as np\n",
<<<<<<< HEAD
"import sklearn\n",
"import gradio\n",
=======
"import base64\n",
"from PIL import Image\n",
"import tensorflow as tf\n",
"import sklearn\n",
"import gradio\n",
"from io import BytesIO\n",
"\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
"from keras.losses import categorical_crossentropy\n",
"from keras.optimizers import Adam\n",
"from keras.regularizers import l2\n",
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def postprocessing(prediction):\n",
" \"\"\"\n",
" \"\"\"\n",
" emotion_dict = {0: \"DANGEROUS\", 1: \"DANGEROUS\", 2: \"DANGEROUS\", 3: \"DANGEROUS\", 4: \"DANGEROUS\", 5: \"DANGEROUS\", 6: \"DANGEROUS\"}\n",
" return emotion_dict[prediction] "
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def resize_and_crop(img, size, crop_type='top'):\n",
" \"\"\"\n",
" Resize and crop an image to fit the specified size.\n",
" args:\n",
" img_path: path for the image to resize.\n",
" modified_path: path to store the modified image.\n",
" size: `(width, height)` tuple.\n",
" crop_type: can be 'top', 'middle' or 'bottom', depending on this\n",
" value, the image will cropped getting the 'top/left', 'midle' or\n",
" 'bottom/rigth' of the image to fit the size.\n",
" raises:\n",
" Exception: if can not open the file in img_path of there is problems\n",
" to save the image.\n",
" ValueError: if an invalid `crop_type` is provided.\n",
" \"\"\"\n",
" # Get current and desired ratio for the images\n",
" img_ratio = img.size[0] / float(img.size[1])\n",
" ratio = size[0] / float(size[1])\n",
" # The image is scaled/cropped vertically or horizontally depending on the ratio\n",
" if ratio > img_ratio:\n",
" img = img.resize((size[0], size[0] * img.size[1] / img.size[0]),\n",
" Image.ANTIALIAS)\n",
" # Crop in the top, middle or bottom\n",
" if crop_type == 'top':\n",
" box = (0, 0, img.size[0], size[1])\n",
" elif crop_type == 'middle':\n",
" box = (0, (img.size[1] - size[1]) / 2, img.size[0], (img.size[1] + size[1]) / 2)\n",
" elif crop_type == 'bottom':\n",
" box = (0, img.size[1] - size[1], img.size[0], img.size[1])\n",
" else:\n",
" raise ValueError('ERROR: invalid value for crop_type')\n",
" img = img.crop(box)\n",
" elif ratio < img_ratio:\n",
" img = img.resize((size[1] * img.size[0] / img.size[1], size[1]),\n",
" Image.ANTIALIAS)\n",
" # Crop in the top, middle or bottom\n",
" if crop_type == 'top':\n",
" box = (0, 0, size[0], img.size[1])\n",
" elif crop_type == 'middle':\n",
" box = ((img.size[0] - size[0]) / 2, 0, (img.size[0] + size[0]) / 2, img.size[1])\n",
" elif crop_type == 'bottom':\n",
" box = (img.size[0] - size[0], 0, img.size[0], img.size[1])\n",
" else:\n",
" raise ValueError('ERROR: invalid value for crop_type')\n",
" img = img.crop(box)\n",
" else:\n",
" img = img.resize((size[0], size[1]),\n",
" Image.ANTIALIAS)\n",
" # If the scale is the same, we do not need to crop\n",
" return img\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"def pre_p(imgstring): \n",
" content = imgstring.split(';')[1]\n",
" image_encoded = content.split(',')[1]\n",
" body = base64.decodebytes(image_encoded.encode('utf-8'))\n",
" im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')\n",
" im = resize_and_crop(im, (28, 28))\n",
" array = np.array(im).flatten().reshape(1, -1)\n",
" return array \n"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
<<<<<<< HEAD
"name": "stdout",
"output_type": "stream",
"text": [
"http://localhost:6002/templates/tmp_html.html\n"
=======
"ename": "OSError",
"evalue": "[Errno 10048] error while attempting to bind on address ('127.0.0.1', 5680): only one usage of each socket address (protocol/network address/port) is normally permitted",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-19-121213b76e06>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0miface\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInterface\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'webcam'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0moutput\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'class'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmodel_obj\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'keras'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpreprocessing_fn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpre_p\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpostprocessing_fn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpostprocessing\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0miface\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m~\\Desktop\\gradiome\\gradio.py\u001b[0m in \u001b[0;36mlaunch\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 241\u001b[0m \u001b[0mwebbrowser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'file://'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrealpath\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_build_template\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 242\u001b[0m \u001b[0mstart_server\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwebsockets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserve\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcommunicate\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mLOCALHOST_IP\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSOCKET_PORT\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 243\u001b[1;33m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_until_complete\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstart_server\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 244\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 245\u001b[0m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_forever\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\nest_asyncio.py\u001b[0m in \u001b[0;36mrun_until_complete\u001b[1;34m(self, future)\u001b[0m\n\u001b[0;32m 59\u001b[0m \u001b[1;32mwhile\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[0mrun_once\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 61\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 62\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 63\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_run_until_complete_orig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfuture\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\futures.py\u001b[0m in \u001b[0;36mresult\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__log_traceback\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 177\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 178\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 179\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 180\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m__step\u001b[1;34m(***failed resolving arguments***)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[1;31m# We use the `send` method directly, because coroutines\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;31m# don't have `__iter__` and `__next__` methods.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mthrow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m_wrap_awaitable\u001b[1;34m(awaitable)\u001b[0m\n\u001b[0;32m 601\u001b[0m \u001b[0mthat\u001b[0m \u001b[0mwill\u001b[0m \u001b[0mlater\u001b[0m \u001b[0mbe\u001b[0m \u001b[0mwrapped\u001b[0m \u001b[1;32min\u001b[0m \u001b[0ma\u001b[0m \u001b[0mTask\u001b[0m \u001b[0mby\u001b[0m \u001b[0mensure_future\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 602\u001b[0m \"\"\"\n\u001b[1;32m--> 603\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;32myield\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mawaitable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__await__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 604\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 605\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\websockets\\py35\\server.py\u001b[0m in \u001b[0;36m__await_impl__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[1;31m# Duplicated with __iter__ because Python 3.7 requires an async function\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[1;31m# (as explained in __await__ below) which Python 3.4 doesn't support.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 13\u001b[1;33m \u001b[0mserver\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mawait\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_creating_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 14\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mserver\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 15\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\base_events.py\u001b[0m in \u001b[0;36mcreate_server\u001b[1;34m(self, protocol_factory, host, port, family, flags, sock, backlog, ssl, reuse_address, reuse_port, ssl_handshake_timeout, start_serving)\u001b[0m\n\u001b[0;32m 1365\u001b[0m raise OSError(err.errno, 'error while attempting '\n\u001b[0;32m 1366\u001b[0m \u001b[1;34m'to bind on address %r: %s'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1367\u001b[1;33m % (sa, err.strerror.lower())) from None\n\u001b[0m\u001b[0;32m 1368\u001b[0m \u001b[0mcompleted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1369\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mOSError\u001b[0m: [Errno 10048] error while attempting to bind on address ('127.0.0.1', 5680): only one usage of each socket address (protocol/network address/port) is normally permitted"
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
<<<<<<< HEAD
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:194: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
"\n",
"The code that caused this warning is on line 194 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
"\n",
" input_soup = BeautifulSoup(input_page.read())\n",
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:195: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
"\n",
"The code that caused this warning is on line 195 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
"\n",
" output_soup = BeautifulSoup(output_page.read())\n",
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:199: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
"\n",
"The code that caused this warning is on line 199 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
"\n",
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio.py\", line 226, in communicate\n",
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
" yield from self.ensure_open()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
" ) from self.transfer_data_exc\n",
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
=======
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 228, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 211, in predict\n",
" return self.model_obj.predict(array)[0].argmax()\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 1149, in predict\n",
" x, _, _ = self._standardize_user_data(x)\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 751, in _standardize_user_data\n",
" exception_prefix='input')\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training_utils.py\", line 128, in standardize_input_data\n",
" 'with shape ' + str(data_shape))\n",
"ValueError: Error when checking input: expected conv2d_1_input to have 4 dimensions, but got array with shape (1, 784)\n"
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
]
}
],
"source": [
<<<<<<< HEAD
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras')\n",
=======
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras',postprocessing_fn=postprocessing)\n",
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
"iface.launch()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"serving at port 7000\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Exception in thread Thread-6:\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 916, in _bootstrap_inner\n",
" self.run()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 864, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\socketserver.py\", line 236, in serve_forever\n",
" ready = selector.select(poll_interval)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 323, in select\n",
" r, w, _ = self._select(self._readers, self._writers, [], timeout)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 314, in _select\n",
" r, w, x = select.select(r, w, w, timeout)\n",
"OSError: [WinError 10038] An operation was attempted on something that is not a socket\n",
"\n"
]
}
],
"source": [
"import http.server\n",
"import socketserver\n",
"import os\n",
"import threading\n",
"\n",
"TEMPLATE_DIRECTORY = 'templates'\n",
"PORT = 7000\n",
"\n",
"\n",
"web_dir = os.path.realpath(TEMPLATE_DIRECTORY)\n",
"os.chdir(web_dir)\n",
"Handler = http.server.SimpleHTTPRequestHandler\n",
"\n",
"with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n",
" print(\"serving at port\", PORT)\n",
" t = threading.Thread(target=httpd.serve_forever)\n",
" t.start()\n",
"\n",
"path_to_server = 'localhost:{}/'.format(PORT)\n",
"os.chdir('..') # TODO(abidlabs): make this better\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"serving at port 7000\n"
]
}
],
"source": [
"import http.server\n",
"import socketserver\n",
"import os\n",
"import threading\n",
"\n",
"TEMPLATE_DIRECTORY = 'templates'\n",
"PORT = 7000\n",
"\n",
"\n",
"web_dir = os.path.realpath(TEMPLATE_DIRECTORY)\n",
"os.chdir(web_dir)\n",
"Handler = http.server.SimpleHTTPRequestHandler\n",
"\n",
"with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n",
" print(\"serving at port\", PORT)\n",
" httpd.serve_forever()\n",
"# t = threading.Thread(target=httpd.serve_forever)\n",
"# t.start()\n",
"\n",
"path_to_server = 'localhost:{}/'.format(PORT)\n",
"os.chdir('..') # TODO(abidlabs): make this better "
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import subprocess"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<subprocess.Popen at 0x16a3f8aa390>"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"subprocess.Popen(['python', '-m', 'http.server', '6001'])"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,145 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load the Model"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"model = tf.keras.applications.MobileNet()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"inp = gradio.inputs.ImageUpload()\n",
"out = gradio.outputs.Label(label_names='imagenet1000', max_label_length=8)\n",
"\n",
"io = gradio.Interface(inputs=inp, \n",
" outputs=out,\n",
" model=model, \n",
" model_type='keras')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Validating samples: 6/6 [======]\n",
"\n",
"Validation passed successfully!\n",
"Closing existing server...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Exception in thread Thread-6:\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 916, in _bootstrap_inner\n",
" self.run()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 864, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio\\networking.py\", line 253, in serve_forever\n",
" httpd.serve_forever()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\socketserver.py\", line 236, in serve_forever\n",
" ready = selector.select(poll_interval)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 323, in select\n",
" r, w, _ = self._select(self._readers, self._writers, [], timeout)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 314, in _select\n",
" r, w, x = select.select(r, w, w, timeout)\n",
"OSError: [WinError 10038] An operation was attempted on something that is not a socket\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com\n",
"Model is running locally at: http://localhost:7863/\n",
"Model available publicly at: https://21378.gradio.app\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"1000\"\n",
" height=\"500\"\n",
" src=\"http://localhost:7863/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x18fcd228550>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"io.launch(inline=True, share=True, validate=True);"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,180 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load an Facial Emotion Detector Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
}
],
"source": [
"model = load_model('.models/emotion.h5')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def post_p(prediction): \n",
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
" return emotional_dict[prediction.squeeze().argmax()]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Validating samples: 1/6 [=.....]\r\n",
"----------\n",
"Validation failed, likely due to incompatible pre-processing and model input. See below:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio\\interface.py\", line 162, in validate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio\\interface.py\", line 131, in predict\n",
" return self.model_obj.predict(preprocessed_input)\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 1149, in predict\n",
" x, _, _ = self._standardize_user_data(x)\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 751, in _standardize_user_data\n",
" exception_prefix='input')\n",
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training_utils.py\", line 138, in standardize_input_data\n",
" str(data_shape))\n",
"ValueError: Error when checking input: expected conv2d_1_input to have shape (48, 48, 1) but got array with shape (224, 224, 3)\n",
"\n"
]
},
{
"ename": "RuntimeError",
"evalue": "Validation did not pass",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-4-617bd3537cc5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0miface\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInterface\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'imageupload'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'label'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'keras'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0miface\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mshare\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;32m~\\Desktop\\gradiome\\gradio\\interface.py\u001b[0m in \u001b[0;36mlaunch\u001b[1;34m(self, inline, inbrowser, share, validate)\u001b[0m\n\u001b[0;32m 192\u001b[0m \"\"\"\n\u001b[0;32m 193\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mvalidate\u001b[0m \u001b[1;32mand\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalidate_flag\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 194\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalidate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 195\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 196\u001b[0m \u001b[1;31m# If an existing interface is running with this instance, close it.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;32m~\\Desktop\\gradiome\\gradio\\interface.py\u001b[0m in \u001b[0;36mvalidate\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 181\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"\\n\\nValidation passed successfully!\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 182\u001b[0m \u001b[1;32mreturn\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 183\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Validation did not pass\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 184\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 185\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minline\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minbrowser\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mshare\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalidate\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mRuntimeError\u001b[0m: Validation did not pass"
]
}
],
"source": [
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras')\n",
"iface.launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"open('faewpfjw/fefe')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import IFrame\n",
"display(IFrame('http://www.mcabayarea.org', width=1000, height=500))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,188 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"x_train, x_test = x_train / 255.0, x_test / 255.0"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"model = tf.keras.models.Sequential([\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(512, activation=tf.nn.relu),\n",
" tf.keras.layers.Dropout(0.2),\n",
" tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
"])\n",
"\n",
"model.compile(optimizer='adam',\n",
" loss='sparse_categorical_crossentropy',\n",
" metrics=['accuracy'])"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/5\n",
"60000/60000 [==============================] - 28s 463us/step - loss: 0.2181 - acc: 0.9353\n",
"Epoch 2/5\n",
"60000/60000 [==============================] - 29s 481us/step - loss: 0.0972 - acc: 0.9704\n",
"Epoch 3/5\n",
"60000/60000 [==============================] - 29s 476us/step - loss: 0.0676 - acc: 0.97861s - los\n",
"Epoch 4/5\n",
"60000/60000 [==============================] - 29s 488us/step - loss: 0.0538 - acc: 0.9827\n",
"Epoch 5/5\n",
"60000/60000 [==============================] - 26s 433us/step - loss: 0.0431 - acc: 0.9857\n"
]
},
{
"data": {
"text/plain": [
"<tensorflow.python.keras.callbacks.History at 0x20075164dd8>"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model.fit(x_train, y_train, epochs=5)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"iface = gradio.Interface(inputs=\"sketchpad\", outputs=\"label\", model=model, model_type='keras')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Closing existing server...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Exception in thread Thread-6:\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 916, in _bootstrap_inner\n",
" self.run()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 864, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio\\networking.py\", line 253, in serve_forever\n",
" httpd.serve_forever()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\socketserver.py\", line 236, in serve_forever\n",
" ready = selector.select(poll_interval)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 323, in select\n",
" r, w, _ = self._select(self._readers, self._writers, [], timeout)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 314, in _select\n",
" r, w, x = select.select(r, w, w, timeout)\n",
"OSError: [WinError 10038] An operation was attempted on something that is not a socket\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com\n",
"Model is running locally at: http://localhost:7861/\n",
"Model available publicly at: https://30540.gradio.app (may take up to a minute for link to be usable)\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"1000\"\n",
" height=\"500\"\n",
" src=\"http://localhost:7861/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x2007506d7b8>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"(<gradio.networking.serve_files_in_background.<locals>.HTTPServer at 0x2006e4d6e10>,\n",
" 'http://localhost:7861/',\n",
" 'https://30540.gradio.app')"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"iface.launch(inline=True, share=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,140 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load the Model"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model = tf.keras.applications.MobileNet()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"inp = gradio.inputs.ImageUpload()\n",
"out = gradio.outputs.Label(label_names='imagenet1000', max_label_length=8)\n",
"\n",
"io = gradio.Interface(inputs=inp, \n",
" outputs=out,\n",
" model=model, \n",
" model_type='keras')"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Closing existing server...\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Exception in thread Thread-7:\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 916, in _bootstrap_inner\n",
" self.run()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 864, in run\n",
" self._target(*self._args, **self._kwargs)\n",
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio\\networking.py\", line 253, in serve_forever\n",
" httpd.serve_forever()\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\socketserver.py\", line 236, in serve_forever\n",
" ready = selector.select(poll_interval)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 323, in select\n",
" r, w, _ = self._select(self._readers, self._writers, [], timeout)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 314, in _select\n",
" r, w, x = select.select(r, w, w, timeout)\n",
"OSError: [WinError 10038] An operation was attempted on something that is not a socket\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com\n",
"Model is running locally at: http://localhost:7860/\n",
"Model available publicly at: https://53279.gradio.app (may take up to a minute for link to be usable)\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"1000\"\n",
" height=\"500\"\n",
" src=\"http://localhost:7860/\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x16d5fc0be80>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"io.launch(inline=True, validate=False, share=True);"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,345 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"ename": "ModuleNotFoundError",
"evalue": "No module named 'torch'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-1-800f83bb710d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'autoreload'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'2'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorchvision\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'torch'"
]
}
],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import torch\n",
"import torch.nn as nn\n",
"import torchvision\n",
"import torchvision.transforms as transforms\n",
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'torch' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-2-aa6a81b0840f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Device configuration\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mdevice\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'cpu'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m# Hyper-parameters\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0minput_size\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m784\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
]
}
],
"source": [
"# Device configuration\n",
"device = torch.device('cpu')\n",
"\n",
"# Hyper-parameters \n",
"input_size = 784\n",
"hidden_size = 500\n",
"num_classes = 10\n",
"num_epochs = 2\n",
"batch_size = 100\n",
"learning_rate = 0.001\n",
"\n",
"# MNIST dataset \n",
"train_dataset = torchvision.datasets.MNIST(root='../../data', train=True, transform=transforms.ToTensor(), download=True)\n",
"test_dataset = torchvision.datasets.MNIST(root='../../data',train=False, transform=transforms.ToTensor())\n",
"train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,shuffle=True)\n",
"test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'nn' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-3-b0dc3fab7f79>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Fully connected neural network with one hidden layer\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[1;32mclass\u001b[0m \u001b[0mNeuralNet\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhidden_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnum_classes\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mNeuralNet\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfc1\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhidden_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'nn' is not defined"
]
}
],
"source": [
"# Fully connected neural network with one hidden layer\n",
"class NeuralNet(nn.Module):\n",
" def __init__(self, input_size, hidden_size, num_classes):\n",
" super(NeuralNet, self).__init__()\n",
" self.fc1 = nn.Linear(input_size, hidden_size) \n",
" self.relu = nn.ReLU()\n",
" self.fc2 = nn.Linear(hidden_size, num_classes) \n",
" \n",
" def forward(self, x):\n",
" out = self.fc1(x)\n",
" out = self.relu(out)\n",
" out = self.fc2(out)\n",
" return out\n",
"\n",
"model = NeuralNet(input_size, hidden_size, num_classes).to(device)\n",
"\n",
"# Loss and optimizer\n",
"criterion = nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n",
"\n",
"# Train the model\n",
"total_step = len(train_loader)\n",
"for epoch in range(num_epochs):\n",
" for i, (images, labels) in enumerate(train_loader): \n",
" # Move tensors to the configured device\n",
" images = images.reshape(-1, 28*28).to(device)\n",
" labels = labels.to(device)\n",
" \n",
" # Forward pass\n",
" outputs = model(images)\n",
" loss = criterion(outputs, labels)\n",
" \n",
" # Backward and optimize\n",
" optimizer.zero_grad()\n",
" loss.backward()\n",
" optimizer.step()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'torch' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-4-91667d2d5612>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Test the model\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;31m# In test phase, we don't need to compute gradients (for memory efficiency)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mcorrect\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mtotal\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
]
}
],
"source": [
"# Test the model\n",
"# In test phase, we don't need to compute gradients (for memory efficiency)\n",
"with torch.no_grad():\n",
" correct = 0\n",
" total = 0\n",
" for images, labels in test_loader:\n",
" images = images.reshape(-1, 28*28).to(device)\n",
" labels = labels.to(device)\n",
" outputs = model(images)\n",
" _, predicted = torch.max(outputs.data, 1)\n",
" total += labels.size(0)\n",
" correct += (predicted == labels).sum().item()\n",
"\n",
" print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'torch' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-5-f12a632f31c3>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mVariable\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mprediction\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
]
}
],
"source": [
"value = torch.from_numpy(images.numpy())\n",
"print(value.dtype)\n",
"value = torch.autograd.Variable(value)\n",
"print(value.dtype)\n",
"prediction = model(value)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'images' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-6-3125682ab905>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'float64'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mNameError\u001b[0m: name 'images' is not defined"
]
}
],
"source": [
"images.numpy().astype('float64').dtype"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'prediction' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-7-ab875561c356>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mprediction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mNameError\u001b[0m: name 'prediction' is not defined"
]
}
],
"source": [
"prediction.data.numpy().shape"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'prediction' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-8-177468ca29d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mprediction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mNameError\u001b[0m: name 'prediction' is not defined"
]
}
],
"source": [
"prediction.data.numpy()"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'gradio' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-9-bf0f365c764e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0minp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSketchpad\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mflatten\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mscale\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;36m255\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'float32'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mio\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInterface\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minp\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"label\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"pytorch\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
"\u001b[1;31mNameError\u001b[0m: name 'gradio' is not defined"
]
}
],
"source": [
"inp = gradio.inputs.Sketchpad(flatten=True, scale=1/255, dtype='float32')\n",
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"pytorch\", model=model)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'io' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-10-137b131e2f9c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mNameError\u001b[0m: name 'io' is not defined"
]
}
],
"source": [
"io.launch()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"ename": "NameError",
"evalue": "name 'model' is not defined",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-11-1f8a688cae5d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mNameError\u001b[0m: name 'model' is not defined"
]
}
],
"source": [
"model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,178 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"from sklearn import datasets, svm\n",
"import gradio\n",
"import matplotlib.pyplot as plt\n",
"\n",
"# The digits dataset\n",
"digits = datasets.load_digits()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n",
" decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf',\n",
" max_iter=-1, probability=False, random_state=None, shrinking=True,\n",
" tol=0.001, verbose=False)"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# To apply a classifier on this data, we need to flatten the image, to\n",
"# turn the data in a (samples, feature) matrix:\n",
"n_samples = len(digits.images)\n",
"data = digits.images.reshape((n_samples, -1))\n",
"\n",
"# Create a classifier: a support vector classifier\n",
"classifier = svm.SVC(gamma=0.001)\n",
"\n",
"# We learn the digits on the first half of the digits\n",
"classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"16.0"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data.max()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAWoAAAB4CAYAAADbsbjHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAACUZJREFUeJzt3V2MXVUZxvHnkYrEFDptlAsQMq1cYIy2aQkJ0UgbaYJB7RClJkJiMdIm3thoSHuBBJTENkEtmmgGvxqDGlovaCAx2BpahQjS6jQRjZq2E6x8JFCmfDVo7evFPpUJlNnrTPc55z27/1/SZE7nPXuteTvznD377NXliBAAIK+3DXoCAICZEdQAkBxBDQDJEdQAkBxBDQDJEdQAkNxQBrXts2y/bPviJmtBb3uJ3vZO23vbl6DuNOXknxO2j017fH23x4uI/0bE3Ih4ssnaJti+2fYzto/a/qHts3s83hnRW9uLbf/a9vO2j/d6vM6YZ0pvP2/7j7ZftH3Y9jdsn9XjMc+U3l5v+2+dPHjW9k9sz+36OP1e8GJ7UtIXImLXDDVzIqIvP4xNsn2NpB9JWiHpWUk7JO2JiFv6NP6k2tvb90m6QtKUpG0RMafP40+qvb39oqT9kh6XdL6kByTdExF39mn8SbW3txdLejUinrN9rqQfSHoqIr7czXFSXPqwfYfte23/wvZLkm6wfYXtR21P2X7a9ndsv71TP8d22B7tPL6n8/lf2X7J9u9tL+y2tvP5j9n+e+cV8Lu2H7G9pvBL+ZykuyPirxFxRNIdkkqf2xNt6W2npz+W9JcG23NaWtTb70XEIxHx74g4LOnnkj7UXKe616LePhkRz037qxOSLum2HymCuuNaVd8g8yTdK+m4pC9Jepeqb5qrJa2b4fmflfRVSQskPSnp693W2j5f0jZJN3fGPSTp8pNPsr2w801ywVsc9/2qzkxO2i/pQtvzZphLP7Sht1m1sbcfkfREYW0vtaK3tq+0fVTSi5I+KWnLDPM4pUxB/XBE3B8RJyLiWEQ8HhGPRcTxiDgo6W5JV87w/F9GxN6I+I+kn0laMovaj0uaiIgdnc99W9L/Xw0j4lBEjETEU29x3LmSjk57fPLjc2eYSz+0obdZtaq3tm+S9EFJ36qr7YNW9DYi9kTEPEkXSbpT1QtBV/p6na/GP6c/sH2ppG9KWibpnarm+tgMz39m2sevqgrNbmsvmD6PiAjbh2tn/rqXJZ037fF50/5+kNrQ26xa01vbn1J1JvnRzqW7QWtNbzvPPWx7l6rfEi6vq58u0xn1G9/VHJf0Z0mXRMR5km6V5B7P4WlJ7zn5wLYlXdjF85+QtHja48WS/hURU81Mb9ba0NusWtFbV2+Ef1/SNRGR4bKH1JLevsEcSe/t9kmZgvqNzlV16eAVV+/4z3QtqikPSFpq+xO256i6HvbuLp7/U0k32b7U9gJJt0ja2vw0T9vQ9daVcySd3Xl8jnt86+MsDWNvV6r63r02Ivb1aI5NGMbe3mD7os7Ho6p+Y/lNt5PIHNRfUXUXxUuqXknv7fWAEfGspM+ouj73vKpXvj9Jek2SbC9ydZ/nKd84iIgHVF3D+q2kSUn/kPS1Xs97Foaut536Y6reoD2r83GaO0CmGcbe3qrqDbsH/fq9zPf3et6zMIy9/YCkR22/IulhVb91d/0C0/f7qIeJq5v+n5L06Yj43aDn0yb0tnfobe8MqreZz6gHwvbVtufZfoeq23WOS/rDgKfVCvS2d+ht72ToLUH9Zh+WdFDVLThXSxqLiNcGO6XWoLe9Q297Z+C95dIHACTHGTUAJEdQA0ByvVqZ2Mj1lO3bt9fWbNiwobZm5cqVReNt2rSptmb+/PlFxyow2xv1+3atavny5bU1U1Nla3luv/322ppVq1YVHatA+t7u3r27tmZsbKzoWEuWzLQyuny8QqezwKSR/m7evLm2ZuPGjbU1CxcurK2RpH376m8t73UucEYNAMkR1ACQHEENAMkR1ACQHEENAMkR1ACQHEENAMkR1ACQXKatuN6kZDHLoUOHamteeOGFovEWLFhQW7Nt27bamuuuu65ovOxGRkZqa/bs2VN0rIceeqi2psEFLwM1MTFRW7NixYramnnzyvZEnpycLKobBiULVUp+BsfHx2tr1q0r+2+hSxa8XHXVVUXHmi3OqAEgOYIaAJIjqAEgOYIaAJIjqAEgOYIaAJIjqAEgOYIaAJIb2IKXkpvISxazHDhwoLZm0aJFRXMq2QmmZN7DsOClZFFGg7uCFO1C0hb33Xdfbc3ixYtra0p3eCnZPWdYrF27tramZCHcsmXLamtKd3jp9WKWEpxRA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJDewBS8lu64sXbq0tqZ0MUuJkpvkh8GWLVtqa2677bbamqNHjzYwm8ry5csbO1Z269evr60ZHR1t5DhSe3bGkcp+ng8ePFhbU7JYrnQhS0lWzZ8/v+hYs8UZNQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHKpF7yU7LjSpAw3tjehZKHEmjVramua/FqnpqYaO9YglXwdJQuOSnaBKbV169bGjjUMShbFHDlypLamdMFLSd2uXbtqa07n54kzagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIbmArE0tW6ezbt6+RsUpWHErS3r17a2tWr159utM5I01MTNTWLFmypA8zOT0lW5jdddddjYxVunpxZGSkkfHapCRfSlYTStK6detqazZv3lxbs2nTpqLxToUzagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQGtuClZDudkgUo27dvb6Sm1IYNGxo7FoZPyRZmu3fvrq3Zv39/bc3Y2FjBjKRVq1bV1tx4442NHCeDjRs31taUbJ9VuhBu586dtTW9XgjHGTUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByqRe8lOyaULIA5bLLLiuaU1M7ygyDkl1BShZA7Nixo2i8kkUgJYtJBq1kF5qS3WxKakp2k5HK/g1GR0dra4ZlwUvJ7i1r165tbLySxSzj4+ONjXcqnFEDQHIENQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHIENQAk54gY9BwAADPgjBoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkvsf2PN/nyaodHgAAAAASUVORK5CYII=\n",
"text/plain": [
"<Figure size 432x288 with 4 Axes>"
]
},
"metadata": {
"needs_background": "light"
},
"output_type": "display_data"
}
],
"source": [
"images_and_labels = list(zip(digits.images, digits.target))\n",
"for index, (image, label) in enumerate(images_and_labels[:4]):\n",
" plt.subplot(2, 4, index + 1)\n",
" plt.axis('off')\n",
" plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n",
" plt.title('Training: %i' % label)\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "predict() missing 1 required positional argument: 'X'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-5-043f82e0ca32>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mclassifier\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m: predict() missing 1 required positional argument: 'X'"
]
}
],
"source": [
"classifier.predict()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"expected = digits.target[n_samples // 2:]\n",
"predicted = classifier.predict(data[n_samples // 2:])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"inp = gradio.inputs.Sketchpad(shape=(8, 8), flatten=True, scale=16/255, invert_colors=False)\n",
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"sklearn\", model=classifier)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"io.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,201 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2\n",
"\n",
"import tensorflow as tf\n",
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"n_classes = 10\n",
"(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"x_train, x_test = x_train.reshape(-1, 784) / 255.0, x_test.reshape(-1, 784) / 255.0\n",
"y_train = tf.keras.utils.to_categorical(y_train, n_classes).astype(float)\n",
"y_test = tf.keras.utils.to_categorical(y_test, n_classes).astype(float)\n",
"\n",
"learning_rate = 0.5\n",
"epochs = 5\n",
"batch_size = 100"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n"
]
}
],
"source": [
"x = tf.placeholder(tf.float32, [None, 784], name=\"x\")\n",
"y = tf.placeholder(tf.float32, [None, 10], name=\"y\")\n",
"\n",
"W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')\n",
"b1 = tf.Variable(tf.random_normal([300]), name='b1')\n",
"W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')\n",
"hidden_out = tf.add(tf.matmul(x, W1), b1)\n",
"hidden_out = tf.nn.relu(hidden_out)\n",
"y_ = tf.matmul(hidden_out, W2)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"probs = tf.nn.softmax(y_)\n",
"cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_, labels=y))\n",
"optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"init_op = tf.global_variables_initializer()\n",
"correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
"accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"sess = tf.Session()\n",
"sess.run(init_op)\n",
"total_batch = int(len(y_train) / batch_size)\n",
"for epoch in range(epochs):\n",
" avg_cost = 0\n",
" for start, end in zip(range(0, len(y_train), batch_size), range(batch_size, len(y_train)+1, batch_size)): \n",
" batch_x = x_train[start: end]\n",
" batch_y = y_train[start: end]\n",
" _, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})\n",
" avg_cost += c / total_batch"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def predict(inp):\n",
" return sess.run(probs, feed_dict={x:inp})"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"inp = gradio.inputs.Sketchpad(flatten=True)\n",
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"pyfunc\", model=predict)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"No validation samples for this interface... skipping validation.\n",
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model is running locally at: http://localhost:7860/interface.html\n",
"Model available publicly for 8 hours at: https://share.gradio.app/25d5d472\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"1000\"\n",
" height=\"500\"\n",
" src=\"http://localhost:7860/interface.html\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x229f9dade80>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"(<gradio.networking.serve_files_in_background.<locals>.HTTPServer at 0x229f97553c8>,\n",
" 'http://localhost:7860/',\n",
" 'http://25d5d472.ngrok.io')"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"io.launch(share=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

Binary file not shown.

View File

@ -1,101 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load an Facial Emotion Detector Model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
}
],
"source": [
"model = load_model('../.models/emotion.h5')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def post_p(prediction): \n",
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
" return emotional_dict[prediction.squeeze().argmax()]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras',postprocessing_fn=post_p)\n",
"iface.launch(share=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,95 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load an Facial Emotion Detector Model"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('../.models/emotion.h5')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def post_p(prediction): \n",
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
" return emotional_dict[prediction.squeeze().argmax()]"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model available locally at: http://localhost:7865/interface.html\n",
"Model available publicly for 8 hours at: http://f7ae4c9b.ngrok.io/interface.html\n"
]
}
],
"source": [
"iface = gradio.Interface(input='webcam', output='class', model=model, model_type='keras',postprocessing_fn=post_p)\n",
"iface.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,82 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"import sys\n",
"import warnings\n",
"\n",
"if not sys.warnoptions:\n",
" warnings.simplefilter(\"ignore\")\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load MNIST Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"model = load_model('../.models/mnist-cnn.h5')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Run Interface"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"iface = gradio.Interface(input='sketchpad', output='class', model=model, model_type='keras')\n",
"iface.launch(share_link=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,253 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"from keras.datasets import imdb\n",
"from keras.preprocessing import sequence\n",
"\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('../.models/sentiment.h5')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from keras.preprocessing.text import one_hot\n",
"from keras.preprocessing.text import text_to_word_sequence\n",
"from keras.preprocessing import sequence\n",
"\n",
"def preprocessing(text): \n",
" word2id = imdb.get_word_index()\n",
" integers = [word2id.get(i,None) for i in text.split(\" \") if word2id.get(i,None)] \n",
" padded = np.array(integers + [0]*(500-len(integers))).reshape(1,-1)\n",
" return padded"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def postprocessing(prediction):\n",
" if prediction[0] > 0.5:\n",
" return \"Good\"\n",
" else:\n",
" return \"Bad\""
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model available locally at: http://localhost:7862/interface.html\n",
"Model available publicly for 8 hours at: http://8756b8ce.ngrok.io/interface.html\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,3] = 5872 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,3] = 5872 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,4] = 7768 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,1] = 7768 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,0] = 5872 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
" return self.model_obj.predict(array)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
" steps=steps)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
" batch_outs = f(ins_batch)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
" return self._call(inputs)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
" fetched = self._callable_fn(*array_vals)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
" run_metadata_ptr)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
" c_api.TF_GetCode(self.status.status))\n",
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,2] = 7768 is not in [0, 5000)\n",
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n"
]
}
],
"source": [
"gradio.Interface(input='textbox', output='class', model=model, model_type='keras', preprocessing_fn=preprocessing, postprocessing_fn=postprocessing).launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,72 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def capitalize_each_word(s):\n",
" return \" \".join(w.capitalize() for w in s.split())"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"iface = gradio.Interface(input=\"textbox\", output=\"textbox\", model=capitalize_each_word, model_type='func')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model available locally at: http://localhost:7860/interface.html\n",
"Model available publicly for 8 hours at: http://a27185eb.ngrok.io/interface.html\n"
]
}
],
"source": [
"iface.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,151 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load an Facial Emotion Detector Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('../.models/emotion.h5')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def post_p(prediction): \n",
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
" return emotional_dict[prediction.squeeze().argmax()]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model is running locally at: http://localhost:7860/interface.html\n",
"To create a public link, set `share=True` in the argument to `launch()`\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"1000\"\n",
" height=\"500\"\n",
" src=\"http://localhost:7860/interface.html\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x2725311ca20>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"('http://localhost:7860/interface.html', None)"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /interface.html HTTP/1.1\" 200 -\n",
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/css/gradio.css HTTP/1.1\" 200 -\n",
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/all-io.js HTTP/1.1\" 200 -\n",
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/image-upload-input.js HTTP/1.1\" 200 -\n",
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/class-output.js HTTP/1.1\" 200 -\n",
"Error in connection handler\n",
"Traceback (most recent call last):\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
" yield from self.ws_handler(self, path)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\interface.py\", line 106, in communicate\n",
" prediction = self.predict(processed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\interface.py\", line 122, in predict\n",
" return self.model_obj.predict(preprocessed_input)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1149, in predict\n",
" x, _, _ = self._standardize_user_data(x)\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 751, in _standardize_user_data\n",
" exception_prefix='input')\n",
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_utils.py\", line 138, in standardize_input_data\n",
" str(data_shape))\n",
"ValueError: Error when checking input: expected conv2d_1_input to have shape (48, 48, 1) but got array with shape (224, 224, 3)\n"
]
}
],
"source": [
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras')\n",
"iface.launch(share=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,111 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load an Facial Emotion Detector Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Colocations handled automatically by placer.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
"Instructions for updating:\n",
"Use tf.cast instead.\n"
]
}
],
"source": [
"model = load_model('../.models/emotion.h5')"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def post_p(prediction): \n",
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
" return emotional_dict[prediction.squeeze().argmax()]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model available locally at: http://localhost:7870/interface.html\n",
"Model available publicly for 8 hours at: http://43693632.ngrok.io/interface.html\n"
]
}
],
"source": [
"iface = gradio.Interface(input='webcam', output='class', model=model, model_type='keras',postprocessing_fn=post_p)\n",
"iface.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.1"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,90 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"import sys\n",
"import warnings\n",
"\n",
"if not sys.warnoptions:\n",
" warnings.simplefilter(\"ignore\")\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Load MNIST Model"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"model = load_model('../.models/mnist-cnn.h5')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Run Interface"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": false
},
"outputs": [],
"source": [
"iface = gradio.Interface(input='sketchpad', output='class', model=model, model_type='keras')\n",
"iface.launch(share_link=False)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,99 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
}
],
"source": [
"import numpy as np\n",
"import sklearn\n",
"import gradio\n",
"from keras.models import load_model\n",
"from keras.datasets import imdb\n",
"from keras.preprocessing import sequence\n",
"\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"model = load_model('../.models/sentiment.h5')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"from keras.preprocessing.text import one_hot\n",
"from keras.preprocessing.text import text_to_word_sequence\n",
"from keras.preprocessing import sequence\n",
"\n",
"def preprocessing(text): \n",
" word2id = imdb.get_word_index()\n",
" integers = [word2id.get(i,None) for i in text.split(\" \") if word2id.get(i,None)] \n",
" padded = np.array(integers + [0]*(500-len(integers))).reshape(1,-1)\n",
" return padded"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def postprocessing(prediction):\n",
" if prediction[0] > 0.5:\n",
" return \"Positive\"\n",
" else:\n",
" return \"Negative\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"gradio.Interface(input='textbox', output='class', model=model, model_type='keras', preprocessing_fn=preprocessing, postprocessing_fn=postprocessing).launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.6 (tensorflow)",
"language": "python",
"name": "tensorflow"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -1,111 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting gradio\n",
" Downloading https://files.pythonhosted.org/packages/3a/ca/62d769859ce82ff93bc1f8c01002a912e0e55ddaf702a880a9f30aaf9303/gradio-0.1.7-py3-none-any.whl (725kB)\n",
"Requirement already satisfied: psutil in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (5.4.7)\n",
"Requirement already satisfied: requests in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (2.19.1)\n",
"Requirement already satisfied: Pillow in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (5.2.0)\n",
"Requirement already satisfied: nest-asyncio in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (0.9.10)\n",
"Requirement already satisfied: websockets in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (7.0)\n",
"Requirement already satisfied: numpy in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (1.15.1)\n",
"Requirement already satisfied: beautifulsoup4 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (4.6.3)\n",
"Requirement already satisfied: urllib3<1.24,>=1.21.1 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (1.23)\n",
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (2018.8.24)\n",
"Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (3.0.4)\n",
"Requirement already satisfied: idna<2.8,>=2.5 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (2.7)\n",
"Installing collected packages: gradio\n",
"Successfully installed gradio-0.1.7\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"You are using pip version 18.1, however version 19.0.2 is available.\n",
"You should consider upgrading via the 'python -m pip install --upgrade pip' command.\n"
]
}
],
"source": [
"!pip install gradio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import gradio"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def capitalize_each_word(s):\n",
" return \" \".join(w.capitalize() for w in s.split())"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"iface = gradio.Interface(input=\"textbox\", output=\"textbox\", model=capitalize_each_word, model_type='func')"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
"Model available locally at: http://localhost:7860/interface.html\n",
"Model available publicly for 8 hours at: http://aed68955.ngrok.io/interface.html\n"
]
}
],
"source": [
"iface.launch()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

BIN
examples/css/.DS_Store vendored

Binary file not shown.

View File

@ -232,7 +232,6 @@ class ImageUpload(AbstractInput):
class CSV(AbstractInput):
def get_name(self):
# return 'templates/input/csv.html'
return 'csv'
def preprocess(self, inp):
@ -248,6 +247,24 @@ class CSV(AbstractInput):
inp = msg['data']['inp']
return json.loads(inp)
class Microphone(AbstractInput):
def get_name(self):
return 'microphone'
def preprocess(self, inp):
"""
By default, no pre-processing is applied to a microphone input file (TODO:aliabid94 fix this)
"""
return inp
def rebuild_flagged(self, dir, msg):
"""
Default rebuild method for csv
"""
inp = msg['data']['inp']
return json.loads(inp)
# Automatically adds all subclasses of AbstractInput into a dictionary (keyed by class name) for easy referencing.
registry = {cls.__name__.lower(): cls for cls in AbstractInput.__subclasses__()}

View File

@ -45,6 +45,7 @@ class Interface:
preprocessing_fns=None,
postprocessing_fns=None,
verbose=True,
saliency=None,
):
"""
:param inputs: a string or `AbstractInput` representing the input interface.
@ -54,6 +55,8 @@ class Interface:
provided.
:param preprocessing_fns: an optional function that overrides the preprocessing function of the input interface.
:param postprocessing_fns: an optional function that overrides the postprocessing fn of the output interface.
:param saliency: an optional function that takes the model and the processed input and returns a 2-d array
"""
if isinstance(inputs, str):
self.input_interface = gradio.inputs.registry[inputs.lower()](
@ -94,6 +97,7 @@ class Interface:
self.validate_flag = False
self.simple_server = None
self.hash = random.getrandbits(32)
self.saliency = saliency
@staticmethod
def _infer_model_type(model):

View File

@ -7,7 +7,6 @@ import socket
import threading
from http.server import HTTPServer as BaseHTTPServer, SimpleHTTPRequestHandler
import pkg_resources
from bs4 import BeautifulSoup
from distutils import dir_util
from gradio import inputs, outputs
import json
@ -43,46 +42,12 @@ FLAGGING_FILENAME = 'gradio-flagged.txt'
def build_template(temp_dir, input_interface, output_interface):
"""
Builds a complete HTML template with supporting JS and CSS files in a given directory.
:param temp_dir: string with path to temp directory in which the template should be built
:param input_interface: an AbstractInput object which includes is used to get the input template
:param output_interface: an AbstractInput object which includes is used to get the input template
Create HTML file with supporting JS and CSS files in a given directory.
:param temp_dir: string with path to temp directory in which the html file should be built
"""
input_template_path = pkg_resources.resource_filename(
"gradio",
inputs.BASE_INPUT_INTERFACE_TEMPLATE_PATH.format(input_interface.get_name()),
)
output_template_path = pkg_resources.resource_filename(
"gradio",
outputs.BASE_OUTPUT_INTERFACE_TEMPLATE_PATH.format(output_interface.get_name()),
)
input_page = open(input_template_path)
output_page = open(output_template_path)
input_soup = BeautifulSoup(
render_string_or_list_with_tags(
input_page.read(), input_interface.get_template_context()
),
features="html.parser",
)
output_soup = BeautifulSoup(
render_string_or_list_with_tags(
output_page.read(), output_interface.get_template_context()
),
features="html.parser",
)
copyfile(BASE_TEMPLATE, os.path.join(temp_dir, TEMPLATE_TEMP));
dir_util.copy_tree(STATIC_PATH_LIB, os.path.join(temp_dir, STATIC_PATH_TEMP))
all_io_page = open(BASE_TEMPLATE)
all_io_soup = BeautifulSoup(all_io_page.read(), features="html.parser")
input_tag = all_io_soup.find("div", {"id": "input"})
output_tag = all_io_soup.find("div", {"id": "output"})
# input_tag.replace_with(input_soup)
# output_tag.replace_with(output_soup)
f = open(os.path.join(temp_dir, TEMPLATE_TEMP), "w")
f.write(str(all_io_soup))
copy_files(STATIC_PATH_LIB, os.path.join(temp_dir, STATIC_PATH_TEMP))
# Move association file to root of temporary directory.
copyfile(os.path.join(temp_dir, ASSOCIATION_PATH_IN_STATIC),
os.path.join(temp_dir, ASSOCIATION_PATH_IN_ROOT))
@ -103,15 +68,6 @@ def build_template(temp_dir, input_interface, output_interface):
)
def copy_files(src_dir, dest_dir):
"""
Copies all the files from one directory to another
:param src_dir: string path to source directory
:param dest_dir: string path to destination directory
"""
dir_util.copy_tree(src_dir, dest_dir)
def render_template_with_tags(template_path, context):
"""
Combines the given template with a given context dictionary by replacing all of the occurrences of tags (enclosed
@ -215,8 +171,11 @@ def serve_files_in_background(interface, port, directory_to_serve=None):
processed_input = interface.input_interface.preprocess(msg["data"])
prediction = interface.predict(processed_input)
processed_output = interface.output_interface.postprocess(prediction)
output = {"action": "output", "data": processed_output}
if interface.saliency is not None:
saliency = interface.saliency(interface.model_obj, processed_input)
output = {"action": "output", "data": processed_output, "saliency": saliency.tolist()}
else:
output = {"action": "output", "data": processed_output}
# Prepare return json dictionary.
self.wfile.write(json.dumps(output).encode())

Binary file not shown.

View File

@ -74,6 +74,11 @@
justify-content: center;
text-align: center;
line-height: 1.5em;
flex-flow: column;
}
.upload_zone img {
height: 120px;
}
.drop_zone {
border: dashed 8px #DDD;

View File

@ -0,0 +1,52 @@
.hidden {
display: none !important;
}
.recording.input_caption {
color: #C00000;
}
.volume_display {
width: 100%;
display: flex;
}
.volume {
flex-grow: 1;
display: flex;
align-items: center;
}
.volume_left {
justify-content: flex-end;
}
.volume_bar {
height: 12px;
background-color: #C00000;
transition: width 0.1s;
}
.volume_left .volume_bar {
border-radius: 4px 0 0 4px;
}
.volume_right .volume_bar {
border-radius: 0 4px 4px 0;
}
.player {
display: flex;
width: 100%;
height: 100%;
flex-flow: column;
align-items: center;
justify-content: center;
}
.waveform {
width: 100%;
}
.waveform > wave {
overflow: visible !important;
}
.waveform canvas {
border: none !important;
}
.playpause {
margin-top: 26px;
font-size: 20px;
padding: 4px;
border-radius: 2px;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

@ -0,0 +1,95 @@
const microphone = {
html: `
<div class="upload_zone">
<img class="not_recording" src="/static/img/mic.png" />
<div class="recording hidden volume_display">
<div class="volume volume_left">
<div class="volume_bar"></div>
</div>
<img src="/static/img/mic_recording.png" />
<div class="volume volume_right">
<div class="volume_bar"></div>
</div>
</div>
<div class="not_recording input_caption">Click to Record from Microphone</div>
<div class="recording hidden input_caption">Click to Stop Recording</div>
</div>
<div class="player hidden">
<div class="waveform"></div>
<button class="playpause primary">Play / Pause</button>
</div>
`,
state: "NO_AUDIO",
init: function() {
var io = this;
this.wavesurfer = WaveSurfer.create({
container: '.waveform',
waveColor: '#888888',
progressColor: '#EEA45D',
barWidth: 3,
hideScrollbar: true
});
this.target.find(".upload_zone").click(function() {
if (io.state == "NO_AUDIO") {
if (!has_audio_loaded) {
loadAudio();
io.mic = new p5.AudioIn();
}
io.recorder = new p5.SoundRecorder();
io.soundFile = new p5.SoundFile();
io.recorder.setInput(io.mic);
io.target.find(".recording").removeClass("hidden");
io.target.find(".not_recording").hide();
io.state = "RECORDING";
io.mic.start();
io.recorder.record(io.soundFile);
var interval_id = window.setInterval(function () {
var volume = Math.floor(100 * io.mic.getLevel());
io.target.find(".volume_bar").width(`${(volume > 0 ? 10 : 0) + Math.round(2 * Math.sqrt(10 * volume))}px`)
}, 100)
}
});
this.target.find(".upload_zone").mousedown(function() {
if (io.state == "RECORDING" || io.state == "STOP_RECORDING") {
io.target.find(".upload_zone").hide();
io.recorder.stop();
var blob = io.soundFile.getBlob();
var reader = new window.FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function() {
io.audio_data = reader.result;
io.target.find(".player").removeClass("hidden");
io.wavesurfer.load(io.audio_data);
if (io.state == "STOP_RECORDING") {
io.state = "RECORDED";
io.submit();
}
io.state = "RECORDED";
}
window.clearInterval(interval_id);
}
})
this.target.find(".playpause").click(function () {
io.wavesurfer.playPause();
})
},
submit: function() {
if (this.state == "RECORDED") {
this.io_master.input(this.id, this.audio_data);
} else if (this.state == "RECORDING") {
this.target.find(".upload_zone").mousedown();
}
},
clear: function() {
this.audio_data = null;
this.state = "NO_AUDIO";
this.target.find(".not_recording").show();
this.target.find(".recording").addClass("hidden");
this.target.find(".player").addClass("hidden");
this.target.find(".upload_zone").show();
if (this.wavesurfer) {
this.wavesurfer.stop();
}
}
}

View File

@ -2,7 +2,8 @@ input_to_object_map = {
"csv" : {},
"imageupload" : image_input,
"sketchpad" : sketchpad_input,
"textbox" : textbox_input
"textbox" : textbox_input,
"microphone" : microphone
}
output_to_object_map = {
"csv" : {},

View File

@ -13,7 +13,7 @@ $("#send_link").click(function(evt) {
"type": "POST",
"crossDomain": true,
"data": {
"url": config["ngrok_socket_url"],
"url": config["share_url"],
"name": name,
"email": email
},

3
gradio/static/js/vendor/p5.dom.min.js vendored Normal file

File diff suppressed because one or more lines are too long

3
gradio/static/js/vendor/p5.min.js vendored Normal file

File diff suppressed because one or more lines are too long

30
gradio/static/js/vendor/p5.sound.min.js vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -10,6 +10,7 @@
<link rel="stylesheet" href="../static/css/interfaces/input/sketchpad.css">
<link rel="stylesheet" href="../static/css/interfaces/input/textbox.css">
<link rel="stylesheet" href="../static/css/interfaces/input/csv.css">
<link rel="stylesheet" href="../static/css/interfaces/input/microphone.css">
<link rel="stylesheet" href="../static/css/interfaces/output/image.css">
<link rel="stylesheet" href="../static/css/interfaces/output/label.css">
<link rel="stylesheet" href="../static/css/interfaces/output/textbox.css">
@ -77,6 +78,11 @@
<script src="../static/js/interfaces/input/sketchpad.js"></script>
<script src="../static/js/interfaces/input/textbox.js"></script>
<script src="../static/js/interfaces/input/csv.js"></script>
<script src="../static/js/interfaces/input/microphone.js"></script>
<script src="../static/js/vendor/wavesurfer.min.js"></script>
<script src="../static/js/vendor/p5.min.js"></script>
<script src="../static/js/vendor/p5.sound.min.js"></script>
<script src="../static/js/vendor/p5.dom.min.js"></script>
<script src="../static/js/interfaces/output/image.js"></script>
<script src="../static/js/interfaces/output/label.js"></script>
<script src="../static/js/interfaces/output/textbox.js"></script>

View File

@ -1,11 +0,0 @@
<div class="gradio input csv">
<div class="role">Input</div>
<div class="input_csv drop_mode">
<div class="input_caption">Drop CSV File Here<br>- or -<br>Click to Upload</div>
</div>
<div class="table_holder"><table class="csv_preview"></table></div>
<input class="hidden_upload" type="file" accept=".csv" />
</div>
<script src="../static/js/vendor/papaparse.min.js"></script>
<script src="../static/js/interfaces/input/csv.js"></script>

View File

@ -1,12 +0,0 @@
<div class="gradio input image_file">
<div class="role">Input</div>
<div class="input_image drop_mode">
<div class="input_caption">Drop Image Here<br>- or -<br>Click to Upload</div>
<img />
</div>
<input class="hidden_upload" type="file" accept="image/x-png,image/gif,image/jpeg" />
</div>
<link rel="stylesheet" href="../static/css/vendor/cropper.css">
<script src="../static/js/vendor/cropper.js"></script>
<script src="../static/js/interfaces/input/image_upload.js"></script>

View File

@ -1 +0,0 @@
<script src="../static/js/vendor/sketchpad.js"></script>

View File

@ -1,12 +0,0 @@
<div class="gradio output image">
<div class="panel_head">
<div class="role">Output</div>
<div class="loading">
<img src="../static/img/logo_mini.png" class="ld ld-skew"/>
<span class="loading_time"></span>
</div>
</div>
<div class="output_image"><img /></div>
</div>
<script src="../static/js/interfaces/output/image.js"></script>

View File

@ -1,13 +0,0 @@
<div class="gradio output classifier">
<div class="panel_head">
<div class="role">Output</div>
<div class="loading">
<img src="../static/img/logo_mini.png" class="ld ld-skew"/>
<span class="loading_time"></span>
</div>
</div>
<div class="output_class"></div>
<div class="confidence_intervals">
</div>
</div>
<script src="../static/js/interfaces/output/label.js"></script>

View File

@ -1,12 +0,0 @@
<div class="gradio output text">
<div class="panel_head">
<div class="role">Output</div>
<div class="loading">
<img src="../static/img/logo_mini.png" class="ld ld-skew"/>
<span class="loading_time"></span>
</div>
</div>
<textarea readonly class="output_text"></textarea>
</div>
<script src="../static/js/interfaces/output/textbox.js"></script>