mirror of
https://github.com/gradio-app/gradio.git
synced 2025-01-12 10:34:32 +08:00
Merge branch 'master' of https://github.com/abidlabs/gradio
This commit is contained in:
commit
6aa75601b0
1
.gitignore
vendored
1
.gitignore
vendored
@ -4,6 +4,7 @@ staticfiles
|
||||
.env
|
||||
*.sqlite3
|
||||
.idea/*
|
||||
*.ipynb
|
||||
.ipynb_checkpoints/*
|
||||
models/*
|
||||
.models/*
|
||||
|
@ -1,476 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
<<<<<<< HEAD
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"x_train shape: (60000, 28, 28, 1)\n",
|
||||
"60000 train samples\n",
|
||||
"10000 test samples\n",
|
||||
"Train on 60000 samples, validate on 10000 samples\n",
|
||||
"Epoch 1/12\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"'''Trains a simple convnet on the MNIST dataset.\n",
|
||||
"Gets to 99.25% test accuracy after 12 epochs\n",
|
||||
"(there is still a lot of margin for parameter tuning).\n",
|
||||
"16 seconds per epoch on a GRID K520 GPU.\n",
|
||||
"'''\n",
|
||||
"\n",
|
||||
"from __future__ import print_function\n",
|
||||
"import keras\n",
|
||||
"from keras.datasets import mnist\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D\n",
|
||||
"from keras import backend as K\n",
|
||||
"\n",
|
||||
"batch_size = 128\n",
|
||||
"num_classes = 10\n",
|
||||
"epochs = 12\n",
|
||||
"\n",
|
||||
"# input image dimensions\n",
|
||||
"img_rows, img_cols = 28, 28\n",
|
||||
"\n",
|
||||
"# the data, split between train and test sets\n",
|
||||
"(x_train, y_train), (x_test, y_test) = mnist.load_data()\n",
|
||||
"\n",
|
||||
"if K.image_data_format() == 'channels_first':\n",
|
||||
" x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n",
|
||||
" x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n",
|
||||
" input_shape = (1, img_rows, img_cols)\n",
|
||||
"else:\n",
|
||||
" x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n",
|
||||
" x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n",
|
||||
" input_shape = (img_rows, img_cols, 1)\n",
|
||||
"\n",
|
||||
"x_train = x_train.astype('float32')\n",
|
||||
"x_test = x_test.astype('float32')\n",
|
||||
"x_train /= 255\n",
|
||||
"x_test /= 255\n",
|
||||
"print('x_train shape:', x_train.shape)\n",
|
||||
"print(x_train.shape[0], 'train samples')\n",
|
||||
"print(x_test.shape[0], 'test samples')\n",
|
||||
"\n",
|
||||
"# convert class vectors to binary class matrices\n",
|
||||
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
|
||||
"y_test = keras.utils.to_categorical(y_test, num_classes)\n",
|
||||
"\n",
|
||||
"model = Sequential()\n",
|
||||
"model.add(Conv2D(32, kernel_size=(3, 3),\n",
|
||||
" activation='relu',\n",
|
||||
" input_shape=input_shape))\n",
|
||||
"model.add(Conv2D(64, (3, 3), activation='relu'))\n",
|
||||
"model.add(MaxPooling2D(pool_size=(2, 2)))\n",
|
||||
"model.add(Dropout(0.25))\n",
|
||||
"model.add(Flatten())\n",
|
||||
"model.add(Dense(128, activation='relu'))\n",
|
||||
"model.add(Dropout(0.5))\n",
|
||||
"model.add(Dense(num_classes, activation='softmax'))\n",
|
||||
"\n",
|
||||
"model.compile(loss=keras.losses.categorical_crossentropy,\n",
|
||||
" optimizer=keras.optimizers.Adadelta(),\n",
|
||||
" metrics=['accuracy'])\n",
|
||||
"\n",
|
||||
"model.fit(x_train, y_train,\n",
|
||||
" batch_size=batch_size,\n",
|
||||
" epochs=epochs,\n",
|
||||
" verbose=1,\n",
|
||||
" validation_data=(x_test, y_test))\n",
|
||||
"score = model.evaluate(x_test, y_test, verbose=0)\n",
|
||||
"print('Test loss:', score[0])\n",
|
||||
"print('Test accuracy:', score[1])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
=======
|
||||
"execution_count": 14,
|
||||
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The autoreload extension is already loaded. To reload it, use:\n",
|
||||
" %reload_ext autoreload\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
<<<<<<< HEAD
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
=======
|
||||
"import base64\n",
|
||||
"from PIL import Image\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from io import BytesIO\n",
|
||||
"\n",
|
||||
"from keras.models import Sequential\n",
|
||||
"from keras.layers import Dense, Dropout, Activation, Flatten\n",
|
||||
"from keras.layers import Conv2D, MaxPooling2D, BatchNormalization\n",
|
||||
"from keras.losses import categorical_crossentropy\n",
|
||||
"from keras.optimizers import Adam\n",
|
||||
"from keras.regularizers import l2\n",
|
||||
"from keras.callbacks import ReduceLROnPlateau, TensorBoard, EarlyStopping, ModelCheckpoint\n",
|
||||
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('model.h5') # found random emotion detector model on github ''(its not very accurate)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def postprocessing(prediction):\n",
|
||||
" \"\"\"\n",
|
||||
" \"\"\"\n",
|
||||
" emotion_dict = {0: \"DANGEROUS\", 1: \"DANGEROUS\", 2: \"DANGEROUS\", 3: \"DANGEROUS\", 4: \"DANGEROUS\", 5: \"DANGEROUS\", 6: \"DANGEROUS\"}\n",
|
||||
" return emotion_dict[prediction] "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"def resize_and_crop(img, size, crop_type='top'):\n",
|
||||
" \"\"\"\n",
|
||||
" Resize and crop an image to fit the specified size.\n",
|
||||
" args:\n",
|
||||
" img_path: path for the image to resize.\n",
|
||||
" modified_path: path to store the modified image.\n",
|
||||
" size: `(width, height)` tuple.\n",
|
||||
" crop_type: can be 'top', 'middle' or 'bottom', depending on this\n",
|
||||
" value, the image will cropped getting the 'top/left', 'midle' or\n",
|
||||
" 'bottom/rigth' of the image to fit the size.\n",
|
||||
" raises:\n",
|
||||
" Exception: if can not open the file in img_path of there is problems\n",
|
||||
" to save the image.\n",
|
||||
" ValueError: if an invalid `crop_type` is provided.\n",
|
||||
" \"\"\"\n",
|
||||
" # Get current and desired ratio for the images\n",
|
||||
" img_ratio = img.size[0] / float(img.size[1])\n",
|
||||
" ratio = size[0] / float(size[1])\n",
|
||||
" # The image is scaled/cropped vertically or horizontally depending on the ratio\n",
|
||||
" if ratio > img_ratio:\n",
|
||||
" img = img.resize((size[0], size[0] * img.size[1] / img.size[0]),\n",
|
||||
" Image.ANTIALIAS)\n",
|
||||
" # Crop in the top, middle or bottom\n",
|
||||
" if crop_type == 'top':\n",
|
||||
" box = (0, 0, img.size[0], size[1])\n",
|
||||
" elif crop_type == 'middle':\n",
|
||||
" box = (0, (img.size[1] - size[1]) / 2, img.size[0], (img.size[1] + size[1]) / 2)\n",
|
||||
" elif crop_type == 'bottom':\n",
|
||||
" box = (0, img.size[1] - size[1], img.size[0], img.size[1])\n",
|
||||
" else:\n",
|
||||
" raise ValueError('ERROR: invalid value for crop_type')\n",
|
||||
" img = img.crop(box)\n",
|
||||
" elif ratio < img_ratio:\n",
|
||||
" img = img.resize((size[1] * img.size[0] / img.size[1], size[1]),\n",
|
||||
" Image.ANTIALIAS)\n",
|
||||
" # Crop in the top, middle or bottom\n",
|
||||
" if crop_type == 'top':\n",
|
||||
" box = (0, 0, size[0], img.size[1])\n",
|
||||
" elif crop_type == 'middle':\n",
|
||||
" box = ((img.size[0] - size[0]) / 2, 0, (img.size[0] + size[0]) / 2, img.size[1])\n",
|
||||
" elif crop_type == 'bottom':\n",
|
||||
" box = (img.size[0] - size[0], 0, img.size[0], img.size[1])\n",
|
||||
" else:\n",
|
||||
" raise ValueError('ERROR: invalid value for crop_type')\n",
|
||||
" img = img.crop(box)\n",
|
||||
" else:\n",
|
||||
" img = img.resize((size[0], size[1]),\n",
|
||||
" Image.ANTIALIAS)\n",
|
||||
" # If the scale is the same, we do not need to crop\n",
|
||||
" return img\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 17,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def pre_p(imgstring): \n",
|
||||
" content = imgstring.split(';')[1]\n",
|
||||
" image_encoded = content.split(',')[1]\n",
|
||||
" body = base64.decodebytes(image_encoded.encode('utf-8'))\n",
|
||||
" im = Image.open(BytesIO(base64.b64decode(image_encoded))).convert('L')\n",
|
||||
" im = resize_and_crop(im, (28, 28))\n",
|
||||
" array = np.array(im).flatten().reshape(1, -1)\n",
|
||||
" return array \n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
<<<<<<< HEAD
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"http://localhost:6002/templates/tmp_html.html\n"
|
||||
=======
|
||||
"ename": "OSError",
|
||||
"evalue": "[Errno 10048] error while attempting to bind on address ('127.0.0.1', 5680): only one usage of each socket address (protocol/network address/port) is normally permitted",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-19-121213b76e06>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0miface\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInterface\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'webcam'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0moutput\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'class'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mmodel_obj\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'keras'\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpreprocessing_fn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpre_p\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mpostprocessing_fn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mpostprocessing\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0miface\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;32m~\\Desktop\\gradiome\\gradio.py\u001b[0m in \u001b[0;36mlaunch\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 241\u001b[0m \u001b[0mwebbrowser\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'file://'\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mos\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrealpath\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_build_template\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 242\u001b[0m \u001b[0mstart_server\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwebsockets\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mserve\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcommunicate\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mLOCALHOST_IP\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSOCKET_PORT\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 243\u001b[1;33m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_until_complete\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstart_server\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 244\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 245\u001b[0m \u001b[0masyncio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_event_loop\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_forever\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\nest_asyncio.py\u001b[0m in \u001b[0;36mrun_until_complete\u001b[1;34m(self, future)\u001b[0m\n\u001b[0;32m 59\u001b[0m \u001b[1;32mwhile\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 60\u001b[0m \u001b[0mrun_once\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 61\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mresult\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 62\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 63\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_run_until_complete_orig\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfuture\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\futures.py\u001b[0m in \u001b[0;36mresult\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 176\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__log_traceback\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 177\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 178\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_exception\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 179\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_result\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 180\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m__step\u001b[1;34m(***failed resolving arguments***)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[1;31m# We use the `send` method directly, because coroutines\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;31m# don't have `__iter__` and `__next__` methods.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcoro\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mthrow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mexc\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\tasks.py\u001b[0m in \u001b[0;36m_wrap_awaitable\u001b[1;34m(awaitable)\u001b[0m\n\u001b[0;32m 601\u001b[0m \u001b[0mthat\u001b[0m \u001b[0mwill\u001b[0m \u001b[0mlater\u001b[0m \u001b[0mbe\u001b[0m \u001b[0mwrapped\u001b[0m \u001b[1;32min\u001b[0m \u001b[0ma\u001b[0m \u001b[0mTask\u001b[0m \u001b[0mby\u001b[0m \u001b[0mensure_future\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 602\u001b[0m \"\"\"\n\u001b[1;32m--> 603\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[1;33m(\u001b[0m\u001b[1;32myield\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mawaitable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__await__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 604\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 605\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\websockets\\py35\\server.py\u001b[0m in \u001b[0;36m__await_impl__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 11\u001b[0m \u001b[1;31m# Duplicated with __iter__ because Python 3.7 requires an async function\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 12\u001b[0m \u001b[1;31m# (as explained in __await__ below) which Python 3.4 doesn't support.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 13\u001b[1;33m \u001b[0mserver\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mawait\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_creating_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 14\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrap\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mserver\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 15\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mws_server\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\asyncio\\base_events.py\u001b[0m in \u001b[0;36mcreate_server\u001b[1;34m(self, protocol_factory, host, port, family, flags, sock, backlog, ssl, reuse_address, reuse_port, ssl_handshake_timeout, start_serving)\u001b[0m\n\u001b[0;32m 1365\u001b[0m raise OSError(err.errno, 'error while attempting '\n\u001b[0;32m 1366\u001b[0m \u001b[1;34m'to bind on address %r: %s'\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1367\u001b[1;33m % (sa, err.strerror.lower())) from None\n\u001b[0m\u001b[0;32m 1368\u001b[0m \u001b[0mcompleted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1369\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mOSError\u001b[0m: [Errno 10048] error while attempting to bind on address ('127.0.0.1', 5680): only one usage of each socket address (protocol/network address/port) is normally permitted"
|
||||
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
<<<<<<< HEAD
|
||||
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:194: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 194 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" input_soup = BeautifulSoup(input_page.read())\n",
|
||||
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:195: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 195 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" output_soup = BeautifulSoup(output_page.read())\n",
|
||||
"C:\\Users\\islam\\Repos\\gradio\\gradio.py:199: UserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"html.parser\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n",
|
||||
"\n",
|
||||
"The code that caused this warning is on line 199 of the file C:\\Users\\islam\\Repos\\gradio\\gradio.py. To get rid of this warning, pass the additional argument 'features=\"html.parser\"' to the BeautifulSoup constructor.\n",
|
||||
"\n",
|
||||
" all_io_soup = BeautifulSoup(all_io_page.read())\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio.py\", line 226, in communicate\n",
|
||||
" processed_input = self.input_interface._pre_process(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\protocol.py\", line 434, in recv\n",
|
||||
" yield from self.ensure_open()\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\protocol.py\", line 646, in ensure_open\n",
|
||||
" ) from self.transfer_data_exc\n",
|
||||
"websockets.exceptions.ConnectionClosed: WebSocket connection is closed: code = 1001 (going away), no reason\n"
|
||||
=======
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 228, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\ALI\\Desktop\\gradiome\\gradio.py\", line 211, in predict\n",
|
||||
" return self.model_obj.predict(array)[0].argmax()\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 1149, in predict\n",
|
||||
" x, _, _ = self._standardize_user_data(x)\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training.py\", line 751, in _standardize_user_data\n",
|
||||
" exception_prefix='input')\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\engine\\training_utils.py\", line 128, in standardize_input_data\n",
|
||||
" 'with shape ' + str(data_shape))\n",
|
||||
"ValueError: Error when checking input: expected conv2d_1_input to have 4 dimensions, but got array with shape (1, 784)\n"
|
||||
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
<<<<<<< HEAD
|
||||
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras')\n",
|
||||
=======
|
||||
"iface = gradio.Interface(input='webcam',output='class',model_obj=model, model_type='keras',postprocessing_fn=postprocessing)\n",
|
||||
>>>>>>> 6aaa97e586a26410e46fa4cc80907293e1f079f9
|
||||
"iface.launch()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"serving at port 7000\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Exception in thread Thread-6:\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 916, in _bootstrap_inner\n",
|
||||
" self.run()\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\threading.py\", line 864, in run\n",
|
||||
" self._target(*self._args, **self._kwargs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\socketserver.py\", line 236, in serve_forever\n",
|
||||
" ready = selector.select(poll_interval)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 323, in select\n",
|
||||
" r, w, _ = self._select(self._readers, self._writers, [], timeout)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\selectors.py\", line 314, in _select\n",
|
||||
" r, w, x = select.select(r, w, w, timeout)\n",
|
||||
"OSError: [WinError 10038] An operation was attempted on something that is not a socket\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import http.server\n",
|
||||
"import socketserver\n",
|
||||
"import os\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"TEMPLATE_DIRECTORY = 'templates'\n",
|
||||
"PORT = 7000\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"web_dir = os.path.realpath(TEMPLATE_DIRECTORY)\n",
|
||||
"os.chdir(web_dir)\n",
|
||||
"Handler = http.server.SimpleHTTPRequestHandler\n",
|
||||
"\n",
|
||||
"with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n",
|
||||
" print(\"serving at port\", PORT)\n",
|
||||
" t = threading.Thread(target=httpd.serve_forever)\n",
|
||||
" t.start()\n",
|
||||
"\n",
|
||||
"path_to_server = 'localhost:{}/'.format(PORT)\n",
|
||||
"os.chdir('..') # TODO(abidlabs): make this better\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"serving at port 7000\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import http.server\n",
|
||||
"import socketserver\n",
|
||||
"import os\n",
|
||||
"import threading\n",
|
||||
"\n",
|
||||
"TEMPLATE_DIRECTORY = 'templates'\n",
|
||||
"PORT = 7000\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"web_dir = os.path.realpath(TEMPLATE_DIRECTORY)\n",
|
||||
"os.chdir(web_dir)\n",
|
||||
"Handler = http.server.SimpleHTTPRequestHandler\n",
|
||||
"\n",
|
||||
"with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\n",
|
||||
" print(\"serving at port\", PORT)\n",
|
||||
" httpd.serve_forever()\n",
|
||||
"# t = threading.Thread(target=httpd.serve_forever)\n",
|
||||
"# t.start()\n",
|
||||
"\n",
|
||||
"path_to_server = 'localhost:{}/'.format(PORT)\n",
|
||||
"os.chdir('..') # TODO(abidlabs): make this better "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import subprocess"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<subprocess.Popen at 0x16a3f8aa390>"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"subprocess.Popen(['python', '-m', 'http.server', '6001'])"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load the Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"The autoreload extension is already loaded. To reload it, use:\n",
|
||||
" %reload_ext autoreload\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Exception ignored in: <function BaseSession._Callable.__del__ at 0x000002BC6788E378>\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1455, in __del__\n",
|
||||
" self._session._session, self._handle, status)\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 528, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: No such callable handle: 0\n",
|
||||
"Exception ignored in: <function BaseSession._Callable.__del__ at 0x000002BC6788E378>\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1455, in __del__\n",
|
||||
" self._session._session, self._handle, status)\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 528, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: No such callable handle: 0\n",
|
||||
"Exception ignored in: <function BaseSession._Callable.__del__ at 0x000002BC6788E378>\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1455, in __del__\n",
|
||||
" self._session._session, self._handle, status)\n",
|
||||
" File \"C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 528, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: No such callable handle: 0\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = tf.keras.applications.MobileNet()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 82,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"def saliency(model,img):\n",
|
||||
" return img[0,:, :, 0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 83,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = gradio.inputs.ImageUpload()\n",
|
||||
"out = gradio.outputs.Label(label_names='imagenet1000', max_label_length=20)\n",
|
||||
"\n",
|
||||
"io = gradio.Interface(inputs=inp, \n",
|
||||
" outputs=out,\n",
|
||||
" model=model, \n",
|
||||
" model_type='keras',\n",
|
||||
" saliency=saliency)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 84,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com\n",
|
||||
"Model is running locally at: http://localhost:7870/\n",
|
||||
"Model available publicly at: https://23329.gradio.app\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"1000\"\n",
|
||||
" height=\"500\"\n",
|
||||
" src=\"http://localhost:7870/\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x2bc0334e3c8>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"io.launch(inline=True, share=True, validate=False);"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load an Facial Emotion Detector Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "OSError",
|
||||
"evalue": "Unable to open file (unable to open file: name = '.models/emotion.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-2-b0a69f92c228>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mload_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'.models/emotion.h5'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\keras\\engine\\saving.py\u001b[0m in \u001b[0;36mload_model\u001b[1;34m(filepath, custom_objects, compile)\u001b[0m\n\u001b[0;32m 415\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 416\u001b[0m \u001b[0mopened_new_file\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mh5py\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mGroup\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 417\u001b[1;33m \u001b[0mf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mh5dict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'r'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 418\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 419\u001b[0m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_deserialize_model\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcustom_objects\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcompile\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\keras\\utils\\io_utils.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, path, mode)\u001b[0m\n\u001b[0;32m 184\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_is_file\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mFalse\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 185\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 186\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mh5py\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mFile\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmode\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 187\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_is_file\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 188\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\h5py\\_hl\\files.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, name, mode, driver, libver, userblock_size, swmr, **kwds)\u001b[0m\n\u001b[0;32m 310\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mphil\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 311\u001b[0m \u001b[0mfapl\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmake_fapl\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdriver\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlibver\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 312\u001b[1;33m \u001b[0mfid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmake_fid\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0muserblock_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfapl\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mswmr\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mswmr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 313\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 314\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mswmr_support\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\h5py\\_hl\\files.py\u001b[0m in \u001b[0;36mmake_fid\u001b[1;34m(name, mode, userblock_size, fapl, fcpl, swmr)\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mswmr\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mswmr_support\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 141\u001b[0m \u001b[0mflags\u001b[0m \u001b[1;33m|=\u001b[0m \u001b[0mh5f\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mACC_SWMR_READ\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 142\u001b[1;33m \u001b[0mfid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mh5f\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mflags\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfapl\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mfapl\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 143\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mmode\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;34m'r+'\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 144\u001b[0m \u001b[0mfid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mh5f\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mh5f\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mACC_RDWR\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfapl\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mfapl\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32mh5py\\_objects.pyx\u001b[0m in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[1;34m()\u001b[0m\n",
|
||||
"\u001b[1;32mh5py\\_objects.pyx\u001b[0m in \u001b[0;36mh5py._objects.with_phil.wrapper\u001b[1;34m()\u001b[0m\n",
|
||||
"\u001b[1;32mh5py\\h5f.pyx\u001b[0m in \u001b[0;36mh5py.h5f.open\u001b[1;34m()\u001b[0m\n",
|
||||
"\u001b[1;31mOSError\u001b[0m: Unable to open file (unable to open file: name = '.models/emotion.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('.models/emotion.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def post_p(prediction): \n",
|
||||
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
|
||||
" return emotional_dict[prediction.squeeze().argmax()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras')\n",
|
||||
"iface.launch(share=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"open('faewpfjw/fefe')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from IPython.display import IFrame\n",
|
||||
"display(IFrame('http://www.mcabayarea.org', width=1000, height=500))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,171 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import tensorflow as tf\n",
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
|
||||
"x_train, x_test = x_train / 255.0, x_test / 255.0"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = tf.keras.models.Sequential([\n",
|
||||
" tf.keras.layers.Flatten(),\n",
|
||||
" tf.keras.layers.Dense(512, activation=tf.nn.relu),\n",
|
||||
" tf.keras.layers.Dropout(0.2),\n",
|
||||
" tf.keras.layers.Dense(10, activation=tf.nn.softmax)\n",
|
||||
"])\n",
|
||||
"\n",
|
||||
"model.compile(optimizer='adam',\n",
|
||||
" loss='sparse_categorical_crossentropy',\n",
|
||||
" metrics=['accuracy'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Epoch 1/1\n",
|
||||
"60000/60000 [==============================] - 24s 407us/step - loss: 0.2171 - acc: 0.9355\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<tensorflow.python.keras.callbacks.History at 0x23bf7b1ae10>"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.fit(x_train, y_train, epochs=1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(inputs=\"sketchpad\", outputs=\"label\", model=model, model_type='keras')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"No validation samples for this interface... skipping validation.\n",
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: contact.gradio@gmail.com\n",
|
||||
"Model is running locally at: http://localhost:7860/\n",
|
||||
"To create a public link, set `share=True` in the argument to `launch()`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"1000\"\n",
|
||||
" height=\"500\"\n",
|
||||
" src=\"http://localhost:7860/\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x23bf7900160>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(<gradio.networking.serve_files_in_background.<locals>.HTTPServer at 0x23bf9174b70>,\n",
|
||||
" 'http://localhost:7860/',\n",
|
||||
" None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Repos\\gradio\\gradio\\interface.py\", line 114, in communicate\n",
|
||||
" msg = json.loads(await websocket.recv())\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\json\\__init__.py\", line 354, in loads\n",
|
||||
" return _default_decoder.decode(s)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\json\\decoder.py\", line 339, in decode\n",
|
||||
" obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\json\\decoder.py\", line 357, in raw_decode\n",
|
||||
" raise JSONDecodeError(\"Expecting value\", s, err.value) from None\n",
|
||||
"json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface.launch(inline=True, share=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
101
Test Keras.ipynb
101
Test Keras.ipynb
@ -1,101 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load the Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2\n",
|
||||
"\n",
|
||||
"import numpy as np\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\layers\\core.py:143: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = tf.keras.applications.MobileNet()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = gradio.inputs.ImageUpload()\n",
|
||||
"out = gradio.outputs.Label(label_names='imagenet1000', max_label_length=8)\n",
|
||||
"\n",
|
||||
"io = gradio.Interface(inputs=inp, \n",
|
||||
" outputs=out,\n",
|
||||
" model=model, \n",
|
||||
" model_type='keras')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"io.launch(inline=True, validate=False);"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,345 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "ModuleNotFoundError",
|
||||
"evalue": "No module named 'torch'",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-1-800f83bb710d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun_line_magic\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'autoreload'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'2'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mtorchvision\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'torch'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import torchvision\n",
|
||||
"import torchvision.transforms as transforms\n",
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'torch' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-2-aa6a81b0840f>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Device configuration\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mdevice\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'cpu'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m# Hyper-parameters\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0minput_size\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m784\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Device configuration\n",
|
||||
"device = torch.device('cpu')\n",
|
||||
"\n",
|
||||
"# Hyper-parameters \n",
|
||||
"input_size = 784\n",
|
||||
"hidden_size = 500\n",
|
||||
"num_classes = 10\n",
|
||||
"num_epochs = 2\n",
|
||||
"batch_size = 100\n",
|
||||
"learning_rate = 0.001\n",
|
||||
"\n",
|
||||
"# MNIST dataset \n",
|
||||
"train_dataset = torchvision.datasets.MNIST(root='../../data', train=True, transform=transforms.ToTensor(), download=True)\n",
|
||||
"test_dataset = torchvision.datasets.MNIST(root='../../data',train=False, transform=transforms.ToTensor())\n",
|
||||
"train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size,shuffle=True)\n",
|
||||
"test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'nn' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-3-b0dc3fab7f79>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Fully connected neural network with one hidden layer\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[1;32mclass\u001b[0m \u001b[0mNeuralNet\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhidden_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnum_classes\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0msuper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mNeuralNet\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfc1\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mLinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mhidden_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'nn' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Fully connected neural network with one hidden layer\n",
|
||||
"class NeuralNet(nn.Module):\n",
|
||||
" def __init__(self, input_size, hidden_size, num_classes):\n",
|
||||
" super(NeuralNet, self).__init__()\n",
|
||||
" self.fc1 = nn.Linear(input_size, hidden_size) \n",
|
||||
" self.relu = nn.ReLU()\n",
|
||||
" self.fc2 = nn.Linear(hidden_size, num_classes) \n",
|
||||
" \n",
|
||||
" def forward(self, x):\n",
|
||||
" out = self.fc1(x)\n",
|
||||
" out = self.relu(out)\n",
|
||||
" out = self.fc2(out)\n",
|
||||
" return out\n",
|
||||
"\n",
|
||||
"model = NeuralNet(input_size, hidden_size, num_classes).to(device)\n",
|
||||
"\n",
|
||||
"# Loss and optimizer\n",
|
||||
"criterion = nn.CrossEntropyLoss()\n",
|
||||
"optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n",
|
||||
"\n",
|
||||
"# Train the model\n",
|
||||
"total_step = len(train_loader)\n",
|
||||
"for epoch in range(num_epochs):\n",
|
||||
" for i, (images, labels) in enumerate(train_loader): \n",
|
||||
" # Move tensors to the configured device\n",
|
||||
" images = images.reshape(-1, 28*28).to(device)\n",
|
||||
" labels = labels.to(device)\n",
|
||||
" \n",
|
||||
" # Forward pass\n",
|
||||
" outputs = model(images)\n",
|
||||
" loss = criterion(outputs, labels)\n",
|
||||
" \n",
|
||||
" # Backward and optimize\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'torch' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-4-91667d2d5612>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Test the model\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;31m# In test phase, we don't need to compute gradients (for memory efficiency)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[1;32mwith\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4\u001b[0m \u001b[0mcorrect\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mtotal\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Test the model\n",
|
||||
"# In test phase, we don't need to compute gradients (for memory efficiency)\n",
|
||||
"with torch.no_grad():\n",
|
||||
" correct = 0\n",
|
||||
" total = 0\n",
|
||||
" for images, labels in test_loader:\n",
|
||||
" images = images.reshape(-1, 28*28).to(device)\n",
|
||||
" labels = labels.to(device)\n",
|
||||
" outputs = model(images)\n",
|
||||
" _, predicted = torch.max(outputs.data, 1)\n",
|
||||
" total += labels.size(0)\n",
|
||||
" correct += (predicted == labels).sum().item()\n",
|
||||
"\n",
|
||||
" print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'torch' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-5-f12a632f31c3>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_numpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mvalue\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mVariable\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mprediction\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'torch' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"value = torch.from_numpy(images.numpy())\n",
|
||||
"print(value.dtype)\n",
|
||||
"value = torch.autograd.Variable(value)\n",
|
||||
"print(value.dtype)\n",
|
||||
"prediction = model(value)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'images' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-6-3125682ab905>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'float64'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'images' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"images.numpy().astype('float64').dtype"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'prediction' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-7-ab875561c356>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mprediction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'prediction' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prediction.data.numpy().shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'prediction' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-8-177468ca29d9>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mprediction\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'prediction' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"prediction.data.numpy()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'gradio' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-9-bf0f365c764e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0minp\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSketchpad\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mflatten\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mscale\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m/\u001b[0m\u001b[1;36m255\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'float32'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2\u001b[0m \u001b[0mio\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgradio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInterface\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0minp\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moutputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"label\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"pytorch\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'gradio' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"inp = gradio.inputs.Sketchpad(flatten=True, scale=1/255, dtype='float32')\n",
|
||||
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"pytorch\", model=model)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'io' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-10-137b131e2f9c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mio\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlaunch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'io' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"io.launch()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "NameError",
|
||||
"evalue": "name 'model' is not defined",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-11-1f8a688cae5d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mNameError\u001b[0m: name 'model' is not defined"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,221 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2\n",
|
||||
"\n",
|
||||
"from sklearn import datasets, svm\n",
|
||||
"import gradio\n",
|
||||
"import matplotlib.pyplot as plt\n",
|
||||
"\n",
|
||||
"# The digits dataset\n",
|
||||
"digits = datasets.load_digits()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n",
|
||||
" decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf',\n",
|
||||
" max_iter=-1, probability=False, random_state=None, shrinking=True,\n",
|
||||
" tol=0.001, verbose=False)"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# To apply a classifier on this data, we need to flatten the image, to\n",
|
||||
"# turn the data in a (samples, feature) matrix:\n",
|
||||
"n_samples = len(digits.images)\n",
|
||||
"data = digits.images.reshape((n_samples, -1))\n",
|
||||
"\n",
|
||||
"# Create a classifier: a support vector classifier\n",
|
||||
"classifier = svm.SVC(gamma=0.001)\n",
|
||||
"\n",
|
||||
"# We learn the digits on the first half of the digits\n",
|
||||
"classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"16.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"data.max()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAWoAAAB4CAYAAADbsbjHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAACUZJREFUeJzt3V2MXVUZxvHnkYrEFDptlAsQMq1cYIy2aQkJ0UgbaYJB7RClJkJiMdIm3thoSHuBBJTENkEtmmgGvxqDGlovaCAx2BpahQjS6jQRjZq2E6x8JFCmfDVo7evFPpUJlNnrTPc55z27/1/SZE7nPXuteTvznD377NXliBAAIK+3DXoCAICZEdQAkBxBDQDJEdQAkBxBDQDJEdQAkNxQBrXts2y/bPviJmtBb3uJ3vZO23vbl6DuNOXknxO2j017fH23x4uI/0bE3Ih4ssnaJti+2fYzto/a/qHts3s83hnRW9uLbf/a9vO2j/d6vM6YZ0pvP2/7j7ZftH3Y9jdsn9XjMc+U3l5v+2+dPHjW9k9sz+36OP1e8GJ7UtIXImLXDDVzIqIvP4xNsn2NpB9JWiHpWUk7JO2JiFv6NP6k2tvb90m6QtKUpG0RMafP40+qvb39oqT9kh6XdL6kByTdExF39mn8SbW3txdLejUinrN9rqQfSHoqIr7czXFSXPqwfYfte23/wvZLkm6wfYXtR21P2X7a9ndsv71TP8d22B7tPL6n8/lf2X7J9u9tL+y2tvP5j9n+e+cV8Lu2H7G9pvBL+ZykuyPirxFxRNIdkkqf2xNt6W2npz+W9JcG23NaWtTb70XEIxHx74g4LOnnkj7UXKe616LePhkRz037qxOSLum2HymCuuNaVd8g8yTdK+m4pC9Jepeqb5qrJa2b4fmflfRVSQskPSnp693W2j5f0jZJN3fGPSTp8pNPsr2w801ywVsc9/2qzkxO2i/pQtvzZphLP7Sht1m1sbcfkfREYW0vtaK3tq+0fVTSi5I+KWnLDPM4pUxB/XBE3B8RJyLiWEQ8HhGPRcTxiDgo6W5JV87w/F9GxN6I+I+kn0laMovaj0uaiIgdnc99W9L/Xw0j4lBEjETEU29x3LmSjk57fPLjc2eYSz+0obdZtaq3tm+S9EFJ36qr7YNW9DYi9kTEPEkXSbpT1QtBV/p6na/GP6c/sH2ppG9KWibpnarm+tgMz39m2sevqgrNbmsvmD6PiAjbh2tn/rqXJZ037fF50/5+kNrQ26xa01vbn1J1JvnRzqW7QWtNbzvPPWx7l6rfEi6vq58u0xn1G9/VHJf0Z0mXRMR5km6V5B7P4WlJ7zn5wLYlXdjF85+QtHja48WS/hURU81Mb9ba0NusWtFbV2+Ef1/SNRGR4bKH1JLevsEcSe/t9kmZgvqNzlV16eAVV+/4z3QtqikPSFpq+xO256i6HvbuLp7/U0k32b7U9gJJt0ja2vw0T9vQ9daVcySd3Xl8jnt86+MsDWNvV6r63r02Ivb1aI5NGMbe3mD7os7Ho6p+Y/lNt5PIHNRfUXUXxUuqXknv7fWAEfGspM+ouj73vKpXvj9Jek2SbC9ydZ/nKd84iIgHVF3D+q2kSUn/kPS1Xs97Foaut536Y6reoD2r83GaO0CmGcbe3qrqDbsH/fq9zPf3et6zMIy9/YCkR22/IulhVb91d/0C0/f7qIeJq5v+n5L06Yj43aDn0yb0tnfobe8MqreZz6gHwvbVtufZfoeq23WOS/rDgKfVCvS2d+ht72ToLUH9Zh+WdFDVLThXSxqLiNcGO6XWoLe9Q297Z+C95dIHACTHGTUAJEdQA0ByvVqZ2Mj1lO3bt9fWbNiwobZm5cqVReNt2rSptmb+/PlFxyow2xv1+3atavny5bU1U1Nla3luv/322ppVq1YVHatA+t7u3r27tmZsbKzoWEuWzLQyuny8QqezwKSR/m7evLm2ZuPGjbU1CxcurK2RpH376m8t73UucEYNAMkR1ACQHEENAMkR1ACQHEENAMkR1ACQHEENAMkR1ACQXKatuN6kZDHLoUOHamteeOGFovEWLFhQW7Nt27bamuuuu65ovOxGRkZqa/bs2VN0rIceeqi2psEFLwM1MTFRW7NixYramnnzyvZEnpycLKobBiULVUp+BsfHx2tr1q0r+2+hSxa8XHXVVUXHmi3OqAEgOYIaAJIjqAEgOYIaAJIjqAEgOYIaAJIjqAEgOYIaAJIb2IKXkpvISxazHDhwoLZm0aJFRXMq2QmmZN7DsOClZFFGg7uCFO1C0hb33Xdfbc3ixYtra0p3eCnZPWdYrF27tramZCHcsmXLamtKd3jp9WKWEpxRA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJDewBS8lu64sXbq0tqZ0MUuJkpvkh8GWLVtqa2677bbamqNHjzYwm8ry5csbO1Z269evr60ZHR1t5DhSe3bGkcp+ng8ePFhbU7JYrnQhS0lWzZ8/v+hYs8UZNQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHKpF7yU7LjSpAw3tjehZKHEmjVramua/FqnpqYaO9YglXwdJQuOSnaBKbV169bGjjUMShbFHDlypLamdMFLSd2uXbtqa07n54kzagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIbmArE0tW6ezbt6+RsUpWHErS3r17a2tWr159utM5I01MTNTWLFmypA8zOT0lW5jdddddjYxVunpxZGSkkfHapCRfSlYTStK6detqazZv3lxbs2nTpqLxToUzagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQIagBIjqAGgOQGtuClZDudkgUo27dvb6Sm1IYNGxo7FoZPyRZmu3fvrq3Zv39/bc3Y2FjBjKRVq1bV1tx4442NHCeDjRs31taUbJ9VuhBu586dtTW9XgjHGTUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByBDUAJEdQA0ByqRe8lOyaULIA5bLLLiuaU1M7ygyDkl1BShZA7Nixo2i8kkUgJYtJBq1kF5qS3WxKakp2k5HK/g1GR0dra4ZlwUvJ7i1r165tbLySxSzj4+ONjXcqnFEDQHIENQAkR1ADQHIENQAkR1ADQHIENQAkR1ADQHIENQAk54gY9BwAADPgjBoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkiOoASA5ghoAkvsf2PN/nyaodHgAAAAASUVORK5CYII=\n",
|
||||
"text/plain": [
|
||||
"<Figure size 432x288 with 4 Axes>"
|
||||
]
|
||||
},
|
||||
"metadata": {
|
||||
"needs_background": "light"
|
||||
},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"images_and_labels = list(zip(digits.images, digits.target))\n",
|
||||
"for index, (image, label) in enumerate(images_and_labels[:4]):\n",
|
||||
" plt.subplot(2, 4, index + 1)\n",
|
||||
" plt.axis('off')\n",
|
||||
" plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n",
|
||||
" plt.title('Training: %i' % label)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "TypeError",
|
||||
"evalue": "predict() missing 1 required positional argument: 'X'",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-5-043f82e0ca32>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mclassifier\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;31mTypeError\u001b[0m: predict() missing 1 required positional argument: 'X'"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"classifier.predict()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"expected = digits.target[n_samples // 2:]\n",
|
||||
"predicted = classifier.predict(data[n_samples // 2:])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = gradio.inputs.Sketchpad(shape=(8, 8), flatten=True, scale=16/255, invert_colors=False)\n",
|
||||
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"sklearn\", model=classifier)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"No validation samples for this interface... skipping validation.\n",
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: gradio.app@gmail.com\n",
|
||||
"Model is running locally at: http://localhost:7862/\n",
|
||||
"Model available publicly at: https://52991.gradio.app\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"1000\"\n",
|
||||
" height=\"500\"\n",
|
||||
" src=\"http://localhost:7862/\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x1b59c46c6d8>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(<gradio.networking.serve_files_in_background.<locals>.HTTPServer at 0x1b59a2a8d30>,\n",
|
||||
" 'http://localhost:7862/',\n",
|
||||
" 'https://52991.gradio.app')"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"io.launch()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,201 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2\n",
|
||||
"\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"n_classes = 10\n",
|
||||
"(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
|
||||
"x_train, x_test = x_train.reshape(-1, 784) / 255.0, x_test.reshape(-1, 784) / 255.0\n",
|
||||
"y_train = tf.keras.utils.to_categorical(y_train, n_classes).astype(float)\n",
|
||||
"y_test = tf.keras.utils.to_categorical(y_test, n_classes).astype(float)\n",
|
||||
"\n",
|
||||
"learning_rate = 0.5\n",
|
||||
"epochs = 5\n",
|
||||
"batch_size = 100"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"x = tf.placeholder(tf.float32, [None, 784], name=\"x\")\n",
|
||||
"y = tf.placeholder(tf.float32, [None, 10], name=\"y\")\n",
|
||||
"\n",
|
||||
"W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.03), name='W1')\n",
|
||||
"b1 = tf.Variable(tf.random_normal([300]), name='b1')\n",
|
||||
"W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.03), name='W2')\n",
|
||||
"hidden_out = tf.add(tf.matmul(x, W1), b1)\n",
|
||||
"hidden_out = tf.nn.relu(hidden_out)\n",
|
||||
"y_ = tf.matmul(hidden_out, W2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"probs = tf.nn.softmax(y_)\n",
|
||||
"cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_, labels=y))\n",
|
||||
"optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"init_op = tf.global_variables_initializer()\n",
|
||||
"correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
|
||||
"accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sess = tf.Session()\n",
|
||||
"sess.run(init_op)\n",
|
||||
"total_batch = int(len(y_train) / batch_size)\n",
|
||||
"for epoch in range(epochs):\n",
|
||||
" avg_cost = 0\n",
|
||||
" for start, end in zip(range(0, len(y_train), batch_size), range(batch_size, len(y_train)+1, batch_size)): \n",
|
||||
" batch_x = x_train[start: end]\n",
|
||||
" batch_y = y_train[start: end]\n",
|
||||
" _, c = sess.run([optimizer, cross_entropy], feed_dict={x: batch_x, y: batch_y})\n",
|
||||
" avg_cost += c / total_batch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def predict(inp):\n",
|
||||
" return sess.run(probs, feed_dict={x:inp})"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"inp = gradio.inputs.Sketchpad(flatten=True)\n",
|
||||
"io = gradio.Interface(inputs=inp, outputs=\"label\", model_type=\"pyfunc\", model=predict)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"No validation samples for this interface... skipping validation.\n",
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model is running locally at: http://localhost:7860/interface.html\n",
|
||||
"Model available publicly for 8 hours at: https://share.gradio.app/25d5d472\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"1000\"\n",
|
||||
" height=\"500\"\n",
|
||||
" src=\"http://localhost:7860/interface.html\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x229f9dade80>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(<gradio.networking.serve_files_in_background.<locals>.HTTPServer at 0x229f97553c8>,\n",
|
||||
" 'http://localhost:7860/',\n",
|
||||
" 'http://25d5d472.ngrok.io')"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"io.launch(share=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
BIN
build/lib/gradio/css/.DS_Store
vendored
BIN
build/lib/gradio/css/.DS_Store
vendored
Binary file not shown.
BIN
build/lib/gradio/static/css/.DS_Store
vendored
BIN
build/lib/gradio/static/css/.DS_Store
vendored
Binary file not shown.
@ -1,101 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load an Facial Emotion Detector Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('../.models/emotion.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def post_p(prediction): \n",
|
||||
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
|
||||
" return emotional_dict[prediction.squeeze().argmax()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras',postprocessing_fn=post_p)\n",
|
||||
"iface.launch(share=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load an Facial Emotion Detector Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/emotion.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def post_p(prediction): \n",
|
||||
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
|
||||
" return emotional_dict[prediction.squeeze().argmax()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model available locally at: http://localhost:7865/interface.html\n",
|
||||
"Model available publicly for 8 hours at: http://f7ae4c9b.ngrok.io/interface.html\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='webcam', output='class', model=model, model_type='keras',postprocessing_fn=post_p)\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,82 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"import sys\n",
|
||||
"import warnings\n",
|
||||
"\n",
|
||||
"if not sys.warnoptions:\n",
|
||||
" warnings.simplefilter(\"ignore\")\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load MNIST Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/mnist-cnn.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Run Interface"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='sketchpad', output='class', model=model, model_type='keras')\n",
|
||||
"iface.launch(share_link=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,253 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"from keras.datasets import imdb\n",
|
||||
"from keras.preprocessing import sequence\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/sentiment.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from keras.preprocessing.text import one_hot\n",
|
||||
"from keras.preprocessing.text import text_to_word_sequence\n",
|
||||
"from keras.preprocessing import sequence\n",
|
||||
"\n",
|
||||
"def preprocessing(text): \n",
|
||||
" word2id = imdb.get_word_index()\n",
|
||||
" integers = [word2id.get(i,None) for i in text.split(\" \") if word2id.get(i,None)] \n",
|
||||
" padded = np.array(integers + [0]*(500-len(integers))).reshape(1,-1)\n",
|
||||
" return padded"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def postprocessing(prediction):\n",
|
||||
" if prediction[0] > 0.5:\n",
|
||||
" return \"Good\"\n",
|
||||
" else:\n",
|
||||
" return \"Bad\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model available locally at: http://localhost:7862/interface.html\n",
|
||||
"Model available publicly for 8 hours at: http://8756b8ce.ngrok.io/interface.html\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,3] = 5872 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,3] = 5872 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,4] = 7768 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,1] = 7768 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,0] = 5872 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 116, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\__init__.py\", line 100, in predict\n",
|
||||
" return self.model_obj.predict(array)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1169, in predict\n",
|
||||
" steps=steps)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_arrays.py\", line 294, in predict_loop\n",
|
||||
" batch_outs = f(ins_batch)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2715, in __call__\n",
|
||||
" return self._call(inputs)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py\", line 2675, in _call\n",
|
||||
" fetched = self._callable_fn(*array_vals)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py\", line 1382, in __call__\n",
|
||||
" run_metadata_ptr)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\", line 519, in __exit__\n",
|
||||
" c_api.TF_GetCode(self.status.status))\n",
|
||||
"tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,2] = 7768 is not in [0, 5000)\n",
|
||||
"\t [[Node: embedding_1/embedding_lookup = GatherV2[Taxis=DT_INT32, Tindices=DT_INT32, Tparams=DT_FLOAT, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](embedding_1/embeddings/read, embedding_1/Cast, embedding_1/embedding_lookup/axis)]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"gradio.Interface(input='textbox', output='class', model=model, model_type='keras', preprocessing_fn=preprocessing, postprocessing_fn=postprocessing).launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def capitalize_each_word(s):\n",
|
||||
" return \" \".join(w.capitalize() for w in s.split())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input=\"textbox\", output=\"textbox\", model=capitalize_each_word, model_type='func')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model available locally at: http://localhost:7860/interface.html\n",
|
||||
"Model available publicly for 8 hours at: http://a27185eb.ngrok.io/interface.html\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,151 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load an Facial Emotion Detector Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/emotion.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def post_p(prediction): \n",
|
||||
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
|
||||
" return emotional_dict[prediction.squeeze().argmax()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model is running locally at: http://localhost:7860/interface.html\n",
|
||||
"To create a public link, set `share=True` in the argument to `launch()`\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/html": [
|
||||
"\n",
|
||||
" <iframe\n",
|
||||
" width=\"1000\"\n",
|
||||
" height=\"500\"\n",
|
||||
" src=\"http://localhost:7860/interface.html\"\n",
|
||||
" frameborder=\"0\"\n",
|
||||
" allowfullscreen\n",
|
||||
" ></iframe>\n",
|
||||
" "
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.lib.display.IFrame at 0x2725311ca20>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"('http://localhost:7860/interface.html', None)"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /interface.html HTTP/1.1\" 200 -\n",
|
||||
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/css/gradio.css HTTP/1.1\" 200 -\n",
|
||||
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/all-io.js HTTP/1.1\" 200 -\n",
|
||||
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/image-upload-input.js HTTP/1.1\" 200 -\n",
|
||||
"127.0.0.1 - - [06/Mar/2019 12:13:52] \"GET /static/js/class-output.js HTTP/1.1\" 200 -\n",
|
||||
"Error in connection handler\n",
|
||||
"Traceback (most recent call last):\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\websockets\\server.py\", line 169, in handler\n",
|
||||
" yield from self.ws_handler(self, path)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\interface.py\", line 106, in communicate\n",
|
||||
" prediction = self.predict(processed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\gradio\\interface.py\", line 122, in predict\n",
|
||||
" return self.model_obj.predict(preprocessed_input)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 1149, in predict\n",
|
||||
" x, _, _ = self._standardize_user_data(x)\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training.py\", line 751, in _standardize_user_data\n",
|
||||
" exception_prefix='input')\n",
|
||||
" File \"C:\\Users\\islam\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\keras\\engine\\training_utils.py\", line 138, in standardize_input_data\n",
|
||||
" str(data_shape))\n",
|
||||
"ValueError: Error when checking input: expected conv2d_1_input to have shape (48, 48, 1) but got array with shape (224, 224, 3)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(inputs='imageupload', outputs='label', model=model, model_type='keras')\n",
|
||||
"iface.launch(share=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load an Facial Emotion Detector Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\framework\\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Colocations handled automatically by placer.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
|
||||
"WARNING:tensorflow:From C:\\Users\\ALI\\Anaconda3\\lib\\site-packages\\tensorflow\\python\\ops\\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
|
||||
"Instructions for updating:\n",
|
||||
"Use tf.cast instead.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model = load_model('../.models/emotion.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def post_p(prediction): \n",
|
||||
" emotional_dict = {0: \"Angry\", 1: \"Disgusted\", 2: \"Fearful\", 3: \"Happy\", 4: \"Neutral\", 5: \"Sad\", 6: \"Surprised\"}\n",
|
||||
" return emotional_dict[prediction.squeeze().argmax()]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model available locally at: http://localhost:7870/interface.html\n",
|
||||
"Model available publicly for 8 hours at: http://43693632.ngrok.io/interface.html\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='webcam', output='class', model=model, model_type='keras',postprocessing_fn=post_p)\n",
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.1"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"import sys\n",
|
||||
"import warnings\n",
|
||||
"\n",
|
||||
"if not sys.warnoptions:\n",
|
||||
" warnings.simplefilter(\"ignore\")\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Load MNIST Model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/mnist-cnn.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Run Interface"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input='sketchpad', output='class', model=model, model_type='keras')\n",
|
||||
"iface.launch(share_link=False)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Using TensorFlow backend.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import sklearn\n",
|
||||
"import gradio\n",
|
||||
"from keras.models import load_model\n",
|
||||
"from keras.datasets import imdb\n",
|
||||
"from keras.preprocessing import sequence\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"%load_ext autoreload\n",
|
||||
"%autoreload 2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = load_model('../.models/sentiment.h5')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from keras.preprocessing.text import one_hot\n",
|
||||
"from keras.preprocessing.text import text_to_word_sequence\n",
|
||||
"from keras.preprocessing import sequence\n",
|
||||
"\n",
|
||||
"def preprocessing(text): \n",
|
||||
" word2id = imdb.get_word_index()\n",
|
||||
" integers = [word2id.get(i,None) for i in text.split(\" \") if word2id.get(i,None)] \n",
|
||||
" padded = np.array(integers + [0]*(500-len(integers))).reshape(1,-1)\n",
|
||||
" return padded"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def postprocessing(prediction):\n",
|
||||
" if prediction[0] > 0.5:\n",
|
||||
" return \"Positive\"\n",
|
||||
" else:\n",
|
||||
" return \"Negative\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"gradio.Interface(input='textbox', output='class', model=model, model_type='keras', preprocessing_fn=preprocessing, postprocessing_fn=postprocessing).launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3.6 (tensorflow)",
|
||||
"language": "python",
|
||||
"name": "tensorflow"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Collecting gradio\n",
|
||||
" Downloading https://files.pythonhosted.org/packages/3a/ca/62d769859ce82ff93bc1f8c01002a912e0e55ddaf702a880a9f30aaf9303/gradio-0.1.7-py3-none-any.whl (725kB)\n",
|
||||
"Requirement already satisfied: psutil in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (5.4.7)\n",
|
||||
"Requirement already satisfied: requests in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (2.19.1)\n",
|
||||
"Requirement already satisfied: Pillow in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (5.2.0)\n",
|
||||
"Requirement already satisfied: nest-asyncio in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (0.9.10)\n",
|
||||
"Requirement already satisfied: websockets in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (7.0)\n",
|
||||
"Requirement already satisfied: numpy in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (1.15.1)\n",
|
||||
"Requirement already satisfied: beautifulsoup4 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from gradio) (4.6.3)\n",
|
||||
"Requirement already satisfied: urllib3<1.24,>=1.21.1 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (1.23)\n",
|
||||
"Requirement already satisfied: certifi>=2017.4.17 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (2018.8.24)\n",
|
||||
"Requirement already satisfied: chardet<3.1.0,>=3.0.2 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (3.0.4)\n",
|
||||
"Requirement already satisfied: idna<2.8,>=2.5 in c:\\users\\islam\\anaconda3\\lib\\site-packages (from requests->gradio) (2.7)\n",
|
||||
"Installing collected packages: gradio\n",
|
||||
"Successfully installed gradio-0.1.7\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"You are using pip version 18.1, however version 19.0.2 is available.\n",
|
||||
"You should consider upgrading via the 'python -m pip install --upgrade pip' command.\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import gradio"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def capitalize_each_word(s):\n",
|
||||
" return \" \".join(w.capitalize() for w in s.split())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"iface = gradio.Interface(input=\"textbox\", output=\"textbox\", model=capitalize_each_word, model_type='func')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"NOTE: Gradio is in beta stage, please report all bugs to: a12d@stanford.edu\n",
|
||||
"Model available locally at: http://localhost:7860/interface.html\n",
|
||||
"Model available publicly for 8 hours at: http://aed68955.ngrok.io/interface.html\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"iface.launch()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
BIN
examples/css/.DS_Store
vendored
BIN
examples/css/.DS_Store
vendored
Binary file not shown.
BIN
gradio/static/css/.DS_Store
vendored
BIN
gradio/static/css/.DS_Store
vendored
Binary file not shown.
@ -5,6 +5,6 @@ en = {
|
||||
"restarting python interpreter.",
|
||||
"COLAB_NO_LOCAL": "Cannot display local interface on google colab, public link created.",
|
||||
"PUBLIC_SHARE_TRUE": "To create a public link, set `share=True` in the argument to `launch()`.",
|
||||
"MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {}",
|
||||
"MODEL_PUBLICLY_AVAILABLE_URL": "Model available publicly at: {} (may take up to a minute for link to be usable)",
|
||||
"GENERATING_PUBLIC_LINK": "Generating public link (may take a few seconds...):",
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user