添加人脸识别web镜像

This commit is contained in:
pengluan 2022-07-17 18:39:11 +08:00
parent 2ec63b8ebb
commit 8dda1dda28
8 changed files with 871 additions and 0 deletions

42
images/web/faceai/Dockerfile Executable file
View File

@ -0,0 +1,42 @@
# docker build -t ccr.ccs.tencentyun.com/cube-studio/face:20220701 .
FROM ubuntu:18.04
ENV TZ=Asia/Shanghai
ENV DEBIAN_FRONTEND=noninteractive
# 安装运维工具
RUN apt update; apt install -y --force-yes --no-install-recommends software-properties-common vim apt-transport-https gnupg2 ca-certificates-java rsync jq wget git dnsutils iputils-ping net-tools curl mysql-client locales zip unzip
# 安装中文
RUN apt install -y --force-yes --no-install-recommends locales ttf-wqy-microhei ttf-wqy-zenhei xfonts-wqy && locale-gen zh_CN && locale-gen zh_CN.utf8
ENV LANG zh_CN.UTF-8
ENV LC_ALL zh_CN.UTF-8
ENV LANGUAGE zh_CN.UTF-8
# 便捷操作
RUN echo "alias ll='ls -alF'" >> /root/.bashrc && \
echo "alias la='ls -A'" >> /root/.bashrc && \
echo "alias vi='vim'" >> /root/.bashrc && \
/bin/bash -c "source /root/.bashrc"
# 安装python
RUN add-apt-repository -y ppa:deadsnakes/ppa && apt update && apt install -y libsasl2-dev libpq-dev python3-pip python3-distutils python3.8-dev cmake libgl1-mesa-glx
RUN set -x; rm -rf /usr/bin/python; apt install -y --fix-missing python3.8 && ln -s /usr/bin/python3.8 /usr/bin/python
RUN bash -c "wget https://bootstrap.pypa.io/get-pip.py && python get-pip.py --ignore-installed" \
&& rm -rf /usr/bin/pip && ln -s /usr/bin/pip3 /usr/bin/pip
RUN pip install --upgrade pip && pip install flask requests kubernetes numpy pandas tornado pysnooper gunicorn face_recognition flask_cors opencv-python tensorflow keras
WORKDIR /app
RUN wget https://docker-76009.sz.gfp.tencent-cloud.com/github/cube-studio/service/face_model.zip && unzip face_model.zip && rm face_model.zip
COPY . /app/
ENTRYPOINT ["bash","-c","gunicorn --bind 0.0.0.0:8080 --workers 5 --timeout 300 --limit-request-line 0 --limit-request-field_size 0 --log-level=info server:app"]
# docker run --rm --name face -it -p 8080:8080 -v $PWD:/app csighub.tencentyun.com/tme-kubeflow/ai:face bash
# gunicorn --bind 0.0.0.0:8080 --workers 20 --timeout 300 --limit-request-line 0 --limit-request-field_size 0 --log-level=info server:app
# docker run --rm --name face -it -p 8080:8080 ccr.ccs.tencentyun.com/cube-studio/face:20220701 bash

497
images/web/faceai/server.py Normal file
View File

@ -0,0 +1,497 @@
#coding=utf-8
#绘制面部轮廓
import face_recognition
from PIL import Image, ImageDraw, ImageFont
import base64,numpy,os
import pysnooper,datetime,time
from io import BytesIO
from flask import Flask, render_template, request, Response, jsonify
from flask_cors import CORS
import json
import cv2,dlib
from keras.models import load_model
import datetime
import numpy as np
import sys
import os
import requests,json,datetime,time,os,sys
import shutil,pysnooper
import logging
import base64
app = Flask(__name__)
CORS(app, supports_credentials=True)
# base_dir = os.path.abspath(__file__)
# # print(base_dir)
base_dir = os.path.split(os.path.realpath(__file__))[0]
print(base_dir)
# @pysnooper.snoop()
def draw(image_np):
image = image_np # face_recognition.load_image_file("static/img/ag.png")
#查找图像中所有面部的所有面部特征
face_landmarks_list = face_recognition.face_landmarks(image)
for face_landmarks in face_landmarks_list:
facial_features = [
'chin', # 下巴
'left_eyebrow', # 左眉毛
'right_eyebrow', # 右眉毛
'nose_bridge', # 鼻樑
'nose_tip', # 鼻尖
'left_eye', # 左眼
'right_eye', # 右眼
'top_lip', # 上嘴唇
'bottom_lip' # 下嘴唇
]
'''
PIL会失色,opencv不会
pil_image = Image.fromarray(image,mode='RGB')
d = ImageDraw.Draw(pil_image,mode='RGB')
for facial_feature in facial_features:
d.line(face_landmarks[facial_feature], fill=(255, 255, 255), width=2)
# pil_image.save('face/%s.jpg'%(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
output_buffer = BytesIO()
pil_image.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_byte = base64.b64encode(byte_data)
base64_str = 'data:image/jpg;base64,'+str(base64_byte,encoding='utf-8')
'''
# pil_image = Image.fromarray(image, mode='RGB')
# d = ImageDraw.Draw(image,mode='RGB')
print(face_landmarks[facial_features[0]])
for facial_feature in facial_features:
cv2.polylines(image,np.array([face_landmarks[facial_feature]]),False, color=(255, 255, 255))
# img = cv2.cvtColor(np.asarray(pil_image), cv2.COLOR_RGB2BGR)
base64_byte = cv2.imencode('.jpg', image)[1].tostring()
base64_byte = base64.b64encode(base64_byte)
base64_str = 'data:image/jpg;base64,'+str(base64_byte,encoding='utf-8')
return base64_str
@app.route('/')
def index():
period = int(os.getenv('PERIOD','100'))
return render_template('camera.html',period=period)
@app.route('/hello')
def hello():
return Response('hello_world')
@app.route('/receiveImage/', methods=["POST"])
def receive_image():
str_image = request.data.decode('utf-8')
img = base64.b64decode(str_image)
img_np = numpy.fromstring(img, dtype='uint8')
new_img_np = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
base64_str = draw(new_img_np)
# cv2.imwrite('./images/rev_image.jpg', new_img_np)
# print('data:{}'.format('success'))
# return Response('data:image/jpg;base64,'+str_image)
return Response(base64_str)
#人脸分类器
detector = dlib.get_frontal_face_detector()
# 获取人脸检测器
predictor = dlib.shape_predictor(
"shape_predictor_68_face_landmarks.dat"
)
@app.route('/autodetectFace/',methods=["POST"])
def autodetectFace():
str_image = request.data.decode('utf-8')
img = base64.b64decode(str_image)
img_np = numpy.fromstring(img, dtype='uint8')
new_img_np = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(new_img_np, cv2.COLOR_BGR2GRAY)
dets = detector(gray, 1)
for face in dets:
# 在图片中标注人脸,并显示
# left = face.left()
# top = face.top()
# right = face.right()
# bottom = face.bottom()
# cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0), 2)
# cv2.imshow("image", img)
shape = predictor(new_img_np, face) # 寻找人脸的68个标定点
# 遍历所有点,打印出其坐标,并圈出来
for pt in shape.parts():
pt_pos = (pt.x, pt.y)
cv2.circle(new_img_np, pt_pos, 1, (0, 255, 0), 2)
#cv2.imshow("image", img)
'''
PIL会失色
pil_image = Image.fromarray(new_img_np, mode='RGB')
#pil_image.save("ag11111111111111111.png")
output_buffer = BytesIO()
pil_image.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_byte = base64.b64encode(byte_data)
base64_str = 'data:image/jpg;base64,'+str(base64_byte,encoding='utf-8')
'''
base64_byte = cv2.imencode('.jpg', new_img_np)[1].tostring()
base64_byte = base64.b64encode(base64_byte)
base64_str = 'data:image/jpg;base64,'+str(base64_byte,encoding='utf-8')
return Response(base64_str)
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
if (isinstance(img, numpy.ndarray)): #判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontText = ImageFont.truetype(
"static/font/simsun.ttc", textSize, encoding="utf-8")
draw.text((left, top), text, textColor, font=fontText)
return cv2.cvtColor(numpy.asarray(img), cv2.COLOR_RGB2BGR)
emotion_classifier = load_model(os.path.join(base_dir,'simple_CNN.530-0.65.hdf5'))
face_classifier = cv2.CascadeClassifier(os.path.join(base_dir,"haarcascade_frontalface_default.xml"))
# import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
graph = tf.get_default_graph()
emotion_labels = {
0: '生气',
1: '厌恶',
2: '恐惧',
3: '开心',
4: '难过',
5: '惊喜',
6: '平静'
}
# @pysnooper.snoop()
def get_em(img):
# img = cv2.imread("/app/static/img/emotion.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40))
color = (255, 0, 0)
with graph.as_default():
for (x, y, w, h) in faces:
gray_face = gray[(y):(y + h), (x):(x + w)]
gray_face = cv2.resize(gray_face, (48, 48))
gray_face = gray_face / 255.0
gray_face = np.expand_dims(gray_face, 0)
gray_face = np.expand_dims(gray_face, -1)
emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))
emotion = emotion_labels[emotion_label_arg]
cv2.rectangle(img, (x + 10, y + 10), (x + h - 10, y + w - 10),
(255, 255, 255), 2)
img = cv2ImgAddText(img, emotion, x + h * 0.3, y, color, 20)
'''
PIL会失色,opencv不会
pil_image = Image.fromarray(img, mode='RGB')
# pil_image.save("ag11111111111111111.png")
output_buffer = BytesIO()
pil_image.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_byte = base64.b64encode(byte_data)
base64_str = 'data:image/jpg;base64,'+str(base64_byte,encoding='utf-8')
'''
base64_byte = cv2.imencode('.jpg', img)[1].tostring()
base64_byte = base64.b64encode(base64_byte)
base64_str = 'data:image/jpg;base64,' + str(base64_byte, encoding='utf-8')
return base64_str
@app.route('/emotionRecognition/',methods=["POST"])
def emotionRecognition():
str_image = request.data.decode('utf-8')
img = base64.b64decode(str_image)
img_np = numpy.fromstring(img, dtype='uint8')
new_img_np = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
base64_str=get_em(new_img_np)
return Response(base64_str)
@app.route('/faceswap/',methods=["POST"])
def faceswap():
# modelPath = "../app/shape_predictor_68_face_landmarks.dat"
SCALE_FACTOR = 1
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS +
RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
COLOUR_CORRECT_BLUR_FRAC = 0.6
detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor(modelPath)
class TooManyFaces(Exception):
pass
class NoFaces(Exception):
pass
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
raise TooManyFaces
if len(rects) == 0:
raise NoFaces
return numpy.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(
im,
str(idx),
pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(im, landmarks):
im = numpy.zeros(im.shape[:2], dtype=numpy.float64)
for group in OVERLAY_POINTS:
draw_convex_hull(im, landmarks[group], color=1)
im = numpy.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (FEATHER_AMOUNT, FEATHER_AMOUNT), 0)
return im
def transformation_from_points(points1, points2):
points1 = points1.astype(numpy.float64)
points2 = points2.astype(numpy.float64)
c1 = numpy.mean(points1, axis=0)
c2 = numpy.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = numpy.std(points1)
s2 = numpy.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = numpy.linalg.svd(points1.T * points2)
R = (U * Vt).T
return numpy.vstack([
numpy.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)),
numpy.matrix([0., 0., 1.])
])
def read_im_and_landmarks(fname=None):
if fname:
im = cv2.imread(fname, cv2.IMREAD_COLOR)
else:
str_image = request.data.decode('utf-8')
img = base64.b64decode(str_image)
img_np = numpy.fromstring(img, dtype='uint8')
im = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
im = cv2.resize(im,
(im.shape[1] * SCALE_FACTOR, im.shape[0] * SCALE_FACTOR))
s = get_landmarks(im)
return im, s
def warp_im(im, M, dshape):
output_im = numpy.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(
im,
M[:2], (dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(im1, im2, landmarks1):
blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm(
numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) /
im2_blur.astype(numpy.float64))
# '''
im1, landmarks1 = read_im_and_landmarks(os.path.join(base_dir,"static/img/ag-2.png"))
# im2, landmarks2 = read_im_and_landmarks("static/img/ag.png")
im2, landmarks2 = read_im_and_landmarks()
'''
# im1, landmarks1 = read_im_and_landmarks("static/img/ag-2.png")
im2, landmarks2 = read_im_and_landmarks("static/img/ag-2.png")
im1, landmarks1 = read_im_and_landmarks()
'''
M = transformation_from_points(landmarks1[ALIGN_POINTS],
landmarks2[ALIGN_POINTS])
mask = get_face_mask(im2, landmarks2)
warped_mask = warp_im(mask, M, im1.shape)
combined_mask = numpy.max(
[get_face_mask(im1, landmarks1), warped_mask], axis=0)
warped_im2 = warp_im(im2, M, im1.shape)
warped_corrected_im2 = correct_colours(im1, warped_im2, landmarks1)
output_im = im1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
# cv2.imwrite("static/img/faceswap1.png", output_im)
'''
这里是用PIL输出base64编码,会失去图片原来的颜色
pil_image = Image.fromarray(np.uint8(output_im),mode='RGB')
pil_image.save("ag111111111111111111112.png")
output_buffer = BytesIO()
pil_image.save(output_buffer, format='JPEG')
byte_data = output_buffer.getvalue()
base64_byte = base64.b64encode(byte_data)
base64_str = 'data:image/jpg;base64,' + str(base64_byte, encoding='utf-8')
'''
base64_byte = cv2.imencode('.jpg', output_im)[1].tostring()
base64_byte = base64.b64encode(base64_byte)
base64_str = 'data:image/jpg;base64,' + str(base64_byte, encoding='utf-8')
# output_buffer = BytesIO()
# cv_img.save(output_buffer, format='JPEG')
# byte_data = output_buffer.getvalue()
# base64_byte = base64.b64encode(cv_img)
# base64_str = 'data:image/jpg;base64,' + str(base64_byte, encoding='utf-8')
return base64_str
#
# total_image_name = []
# total_face_encoding = []
#
# def load_local_face():
# path = "static/img/face_recognition" # 模型数据图片目录
#
# for fn in os.listdir(path): # fn 表示的是文件名q
# print(path + "/" + fn)
# img_path = path + "/" + fn
# if '.png' not in img_path:
# os.remove(img_path)
# continue
# face_img = face_recognition.load_image_file(img_path)
#
# faces = face_recognition.face_encodings(face_img)
# if faces and len(faces)>0:
# total_face_encoding.append(faces[0])
# fn = fn[:(len(fn) - 4)] # 截取图片名这里应该把images文件中的图片名命名为为人物名
# total_image_name.append(fn) # 图片名字列表
# else:
# os.remove(img_path)
#
# load_local_face()
#
# @app.route('/facesearch/',methods=["POST"])
# def facesearch():
# str_image = request.data.decode('utf-8')
# img = base64.b64decode(str_image)
# img_np = numpy.fromstring(img, dtype='uint8')
# frame = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
#
# # 发现在视频帧所有的脸和face_enqcodings
# face_locations = face_recognition.face_locations(frame)
# face_encodings = face_recognition.face_encodings(frame, face_locations)
#
# # 在这个视频帧中循环遍历每个人脸
# for (top, right, bottom, left), face_encoding in zip(
# face_locations, face_encodings):
# # 看看面部是否与已知人脸相匹配。
# name = "Unknown"
# result = face_recognition.face_distance(total_face_encoding,face_encoding).tolist()
# # print(result)
# xiangsidu = min(result)
# index = result.index(xiangsidu)
# if xiangsidu<0.4:
# name = total_image_name[index]
#
# # for i, v in enumerate(total_face_encoding):
# # match = face_recognition.compare_faces(
# # [v], face_encoding, tolerance=0.4)
# # # print(i,v)
# # # print(match)
# # if match[0]:
# # name = total_image_name[i]
# # break
# # 画出一个框,框住脸
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# # 画出一个带名字的标签,放在框下
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255),
# cv2.FILLED)
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0,
# (255, 255, 255), 1)
# # 显示结果图像
# base64_byte = cv2.imencode('.jpg', frame)[1].tostring()
# base64_byte = base64.b64encode(base64_byte)
# base64_str = 'data:image/jpg;base64,' + str(base64_byte, encoding='utf-8')
# return Response(base64_str)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8080)
# get_em()

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 180 KiB

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,328 @@
<!doctype html>
<html lang="en">
<head>
<title>GET VIDEO</title>
<meta charset="utf-8">
<script src="{{ url_for('static', filename='js/jquery-1.7.1.min.js') }}"></script>
<script type="text/javascript" src="//cdn.bootcss.com/jquery/3.1.1/jquery.min.js"></script>
<style>
.flex {
display: flex;
justify-content: space-around;
}
.flex>* {
background: black;
width: 25vw;
height: 25vw;
}
</style>
</head>
<body>
<div class="flex">
<video id="video" autoplay="autoplay"></video>
<canvas id="canvas"></canvas>
<img id="result" src="" />
</div>
<div style="text-align: center; padding: 5vh 0;">
<input type="button" title="开启摄像头" value="开启摄像头" onclick="getMedia()" />
{# <button id="upload" onclick="uploadImage()">拍照</button>#}
{# <button id="close" onclick="closeMedia()">关闭</button>#}
<button id="auto" onclick="autouploadImage()">五官</button>
<button id="autodetectFace" onclick="autodetectFace()">人脸关键点</button>
<button id="emotionRecognition" onclick="emotionRecognition()">人脸表情</button>
<button id="faceswap" onclick="faceswap()">换脸</button>
<button id="facesearch" onclick="facesearch()">身份识别</button>
</div>
<script>
var intervalId = null;
function autouploadImage() {
if(intervalId) clearInterval(intervalId);
intervalId = window.setInterval(uploadImage, {{ period }});
}
function closeMedia() {
var video = document.getElementById('video');
if (!video.srcObject) return
let stream = video.srcObject;
let tracks = stream.getTracks();
tracks.forEach(track => {
track.stop()
})
if(intervalId) clearInterval(intervalId);
}
//获得video摄像头区域
let video = document.getElementById("video");
function getMedia() {
let constraints = {
video: { width: 500, height: 500 },
audio: false
};
/*
这里介绍新的方法:H5新媒体接口 navigator.mediaDevices.getUserMedia()
这个方法会提示用户是否允许媒体输入,(媒体输入主要包括相机,视频采集设备,屏幕共享服务,麦克风,A/D转换器等)
返回的是一个Promise对象。
如果用户同意使用权限,则会将 MediaStream对象作为resolve()的参数传给then()
如果用户拒绝使用权限,或者请求的媒体资源不可用,则会将 PermissionDeniedError作为reject()的参数传给catch()
*/
let promise = navigator.mediaDevices.getUserMedia(constraints);
promise.then(function (MediaStream) {
video.srcObject = MediaStream;
video.play();
}).catch(function (PermissionDeniedError) {
console.log(PermissionDeniedError);
})
}
function takePhoto() {
//获得Canvas对象
let canvas = document.getElementById("canvas");
let ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
}
function setImage(base64) {
const dom = document.querySelector('#result');
dom.src = base64;
}
//图片上传到服务器
//获取Canvas的编码
// < !--var video = document.getElementById('video'); -->
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
function uploadImage() {
canvas.width = 300;
canvas.height = 300;
context.drawImage(video, 0, 0, 300, 300);
// Example
var imgData = canvas.toDataURL("image/jpg");
imgData = imgData.replace(/^data:image\/(png|jpg);base64,/, "")
//上传到后台。
var uploadAjax = $.ajax({
type: "post",
//后端需要调用的地址
url: "/receiveImage/",
data: imgData, //JSON.stringify({ "imgData": imgData }),
contentType: "text/application",
//设置超时
timeout: 10000,
async: true,
success: function (resultBase64) {
// 把返回结果显示到dom中
//成功后回调
// resultBase64 = JSON.parse(resultBase64)
setImage(resultBase64);
},
error: function (data) {
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
if (textStatus == 'timeout') {
uploadAjax.abort(); //取消请求
//超时提示:请求超时,请重试
alert("请求超时,请重试")
//请求超时返回首页
closeCard();
}
}
});
}
//自动识别人脸框
function autodetectFace(){
function autodetectFace_f() {
canvas.width = 300;
canvas.height = 300;
context.drawImage(video, 0, 0, 300, 300);
// Example
var imgData = canvas.toDataURL("image/jpg");
imgData = imgData.replace(/^data:image\/(png|jpg);base64,/, "")
//上传到后台。
var uploadAjax = $.ajax({
type: "post",
//后端需要调用的地址
url: "/autodetectFace/",
data: imgData, //JSON.stringify({ "imgData": imgData }),
contentType: "text/application",
//设置超时
timeout: 10000,
async: true,
success: function (resultBase64) {
// 把返回结果显示到dom中
//成功后回调
// resultBase64 = JSON.parse(resultBase64)
setImage(resultBase64);
},
error: function (data) {
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
if (textStatus == 'timeout') {
uploadAjax.abort(); //取消请求
//超时提示:请求超时,请重试
alert("请求超时,请重试")
//请求超时返回首页
closeCard();
}
}
});
}
if(intervalId) clearInterval(intervalId);
intervalId = window.setInterval(autodetectFace_f, {{ period }});
}
//自动识别人脸表情
function emotionRecognition(){
function emotionRecognition_f() {
canvas.width = 300;
canvas.height = 300;
context.drawImage(video, 0, 0, 300, 300);
// Example
var imgData = canvas.toDataURL("image/jpg");
imgData = imgData.replace(/^data:image\/(png|jpg);base64,/, "")
//上传到后台。
var uploadAjax = $.ajax({
type: "post",
//后端需要调用的地址
url: "/emotionRecognition/",
data: imgData, //JSON.stringify({ "imgData": imgData }),
contentType: "text/application",
//设置超时
timeout: 10000,
async: true,
success: function (resultBase64) {
// 把返回结果显示到dom中
//成功后回调
// resultBase64 = JSON.parse(resultBase64)
setImage(resultBase64);
},
error: function (data) {
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
if (textStatus == 'timeout') {
uploadAjax.abort(); //取消请求
//超时提示:请求超时,请重试
alert("请求超时,请重试")
//请求超时返回首页
closeCard();
}
}
});
}
if(intervalId) clearInterval(intervalId);
intervalId = window.setInterval(emotionRecognition_f, {{ period }});
}
//换脸
function faceswap(){
function faceswap_f() {
canvas.width = 300;
canvas.height = 300;
context.drawImage(video, 0, 0, 300, 300);
// Example
var imgData = canvas.toDataURL("image/jpg");
imgData = imgData.replace(/^data:image\/(png|jpg);base64,/, "")
//上传到后台。
var uploadAjax = $.ajax({
type: "post",
//后端需要调用的地址
url: "/faceswap/",
data: imgData, //JSON.stringify({ "imgData": imgData }),
contentType: "text/application",
//设置超时
timeout: 10000,
async: true,
success: function (resultBase64) {
// 把返回结果显示到dom中
//成功后回调
// resultBase64 = JSON.parse(resultBase64)
setImage(resultBase64);
},
error: function (data) {
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
if (textStatus == 'timeout') {
uploadAjax.abort(); //取消请求
//超时提示:请求超时,请重试
alert("请求超时,请重试")
//请求超时返回首页
closeCard();
}
}
});
}
if(intervalId) clearInterval(intervalId);
intervalId = window.setInterval(faceswap_f, 500);
}
//换脸
function facesearch(){
function facesearch_f() {
canvas.width = 300;
canvas.height = 300;
context.drawImage(video, 0, 0, 300, 300);
// Example
var imgData = canvas.toDataURL("image/jpg");
imgData = imgData.replace(/^data:image\/(png|jpg);base64,/, "")
//上传到后台。
var uploadAjax = $.ajax({
type: "post",
//后端需要调用的地址
url: "/facesearch/",
data: imgData, //JSON.stringify({ "imgData": imgData }),
contentType: "text/application",
//设置超时
timeout: 10000,
async: true,
success: function (resultBase64) {
// 把返回结果显示到dom中
//成功后回调
// resultBase64 = JSON.parse(resultBase64)
setImage(resultBase64);
},
error: function (data) {
},
//调用执行后调用的函数
complete: function (XMLHttpRequest, textStatus) {
if (textStatus == 'timeout') {
uploadAjax.abort(); //取消请求
//超时提示:请求超时,请重试
alert("请求超时,请重试")
//请求超时返回首页
closeCard();
}
}
});
}
if(intervalId) clearInterval(intervalId);
intervalId = window.setInterval(facesearch_f, 1000);
}
</script>
</body>
</html>