facenet核心人脸识别

91 阅读7分钟
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from scipy import misc
import tensorflow as tf
import numpy as np
import os
import copy
import argparse
import facenet
import align.detect_face

from flask import Flask
from flask import request
import json
import threading
import uuid
import base64
import cv2
import dlib
import sqllite
from concurrent.futures import ThreadPoolExecutor


#图片对比
def main(image):
    emb = getEmb(image)
    return imageExist(emb)


#图片检测
def test(image, flag):
    # cv2读取图像
    img = cv2.imread(image)
    # 取灰度
    img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # 人脸数rects
    rects = detector(img_gray, 0)
    landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[0]).parts()])
    dic = dict()
    for idx, point in enumerate(landmarks):
        # 68点的坐标
        pos = (point[0, 0], point[0, 1])
        dic[idx] = pos
        # 利用cv2.circle给每个特征点画一个圈,共68个
    #     cv2.circle(img, pos, 1, color=(0, 255, 0))
    #     # 利用cv2.putText输出1-68
    #     font = cv2.FONT_HERSHEY_SIMPLEX
    #     cv2.putText(img, str(idx + 1), pos, font, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
    # cv2.namedWindow("img", 2)
    # cv2.imshow("img", img)
    # cv2.waitKey(0)

    print("38", dic[37][0], dic[37][1])
    print("42", dic[41][0], dic[41][1])
    print("39", dic[38][0], dic[38][1])
    print("41", dic[40][0], dic[40][1])

    right1 = cal_len(dic[37][0], dic[37][1], dic[41][0], dic[41][1])
    right2 = cal_len(dic[38][0], dic[38][1], dic[40][0], dic[40][1])
    right = (right1+right2)*3

    print("44", dic[43][0], dic[43][1])
    print("48", dic[47][0], dic[47][1])
    print("45", dic[44][0], dic[44][1])
    print("47", dic[46][0], dic[46][1])

    left1 = cal_len(dic[43][0], dic[43][1], dic[47][0], dic[47][1])
    left2 = cal_len(dic[44][0], dic[44][1], dic[46][0], dic[46][1])
    left = (left1+left2)*3

    print("62", dic[61][0], dic[61][1])
    print("68", dic[67][0], dic[67][1])
    print("63", dic[62][0], dic[62][1])
    print("67", dic[66][0], dic[66][1])
    print("64", dic[63][0], dic[63][1])
    print("66", dic[65][0], dic[65][1])

    mouth1 = cal_len(dic[61][0], dic[61][1], dic[67][0], dic[67][1])
    mouth2 = cal_len(dic[62][0], dic[62][1], dic[66][0], dic[66][1])
    mouth3 = cal_len(dic[63][0], dic[63][1], dic[65][0], dic[65][1])
    mouth = mouth1+mouth2+mouth3

    print("37", dic[36][0], dic[36][1])
    print("46", dic[45][0], dic[45][1])
    print("28", dic[27][0], dic[27][1])

    right_eye = cal_len(dic[36][0], dic[36][1], dic[27][0], dic[27][1])
    left_eye = cal_len(dic[45][0], dic[45][1], dic[27][0], dic[27][1])
    eye_eye = cal_len(dic[45][0], dic[45][1], dic[36][0], dic[36][1])

    print("49", dic[48][0], dic[48][1])
    print("55", dic[54][0], dic[54][1])
    mouth_w = cal_len(dic[48][0], dic[48][1], dic[54][0], dic[54][1])

    print("28", dic[27][0], dic[27][1])
    print("34", dic[33][0], dic[33][1])
    head = cal_len(dic[27][0], dic[27][1], dic[33][0], dic[33][1])

    print("right_eye", right_eye) #右眼到中间间距
    print("left_eye", left_eye)   #左眼到中间间距
    print("eye_eye", eye_eye)     #两眼间距
    print("eye_len_right", right) #眨右眼
    print("eye_len_left", left)   #眨左眼
    print("mouth_len", mouth)     #张嘴
    print("mouth_w", mouth_w)     #微笑
    print("head", head)           #点头

    lens = {
        "right_eye": right_eye,
        "left_eye": left_eye,
        "eye_eye": eye_eye,
        "eye_len_right": right,
        "eye_len_left": left,
        "mouth_len": mouth,
        "mouth_w": mouth_w,
        "head": head
    }

    return lens


# #拍照检测
# def cutTest(image):
#     # cv2读取图像
#     img = cv2.imread(image)
#     # 取灰度
#     img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#     # 人脸数rects
#     rects = detector(img_gray, 0)
#     landmarks = np.matrix([[p.x, p.y] for p in predictor(img, rects[0]).parts()])
#     dic = dict()
#     for idx, point in enumerate(landmarks):
#         # 68点的坐标
#         pos = (point[0, 0], point[0, 1])
#         dic[idx] = pos
#
#     eye_max = cal_len(dic[36][0], dic[36][1], dic[45][0], dic[45][1])
#     eye_min = cal_len(dic[39][0], dic[39][1], dic[42][0], dic[42][1])
#
#     param1 = eye_max/eye_min
#
#     eye_nose = cal_len((dic[39][0]+dic[42][0])/2, (dic[39][1]+dic[42][1])/2,dic[33][0], dic[33][1])
#     nose_low = cal_len(dic[33][0], dic[33][1], dic[8][0], dic[8][1])
#
#     param2 = eye_nose/nose_low
#
#     side1 = cal_len(dic[0][0], dic[0][1], dic[16][0], dic[16][1])
#     side2 = cal_len(dic[2][0], dic[2][1], dic[14][0], dic[14][1])
#     side3 = cal_len(dic[4][0], dic[4][1], dic[12][0], dic[12][1])
#
#     param3 = side1/side2
#     param4 = side2/side3
#
#     nose = cal_len(dic[31][0], dic[31][1], dic[35][0], dic[35][1])
#     mouth = cal_len(dic[48][0], dic[48][1], dic[54][0], dic[54][1])
#
#     param5 = mouth/nose
#
#     param6 = eye_max/mouth
#     param7 = side2/mouth
#
#     nose_mouth = cal_len(dic[33][0], dic[33][1], (dic[48][0]+dic[54][0])/2, (dic[48][1]+dic[54][1])/2)
#     param8 = nose_low/nose_mouth
#
#     a = cal_dif(500, param1, param2, param3, param4, param5, param6, param7, param8)
#     param = round(np.log10(a), 3)
#     return param


#拍照检测
def cutTest(image):
    param = '-1'
    emb = getEmb(image)
    if emb != '-1':
        param = imageExist(emb)
        if param != '-1':
            return param
        imagels = str(list(emb[0, :]))
        print(imagels)
        uu = str(uuid.uuid1())
        name = 'hbw'
        sqllite.saveImage(uu, imagels, name)
        param = uu
    return param


def imageExist(emb):
    flag = '-1'
    imbs = sqllite.queryImages()
    print(imbs)
    scoreLs = list()
    with ThreadPoolExecutor(8) as executor:
        for imb in imbs:
            future = executor.submit(cal_compare, emb, imb, scoreLs)
            print("result", future.result())
            a = future.result()
            if a and a.__len__() > 0:
                flag = a[0]
                break
            # t = threading.Thread(target=cal_compare, args=(emb, imb, scoreLs))
            # t.start()
            # t.join()
    return flag


def cal_compare(emb, imb, scoreLs):
    em = str2array(imb.image)
    score = np.sqrt(np.sum(np.square(np.subtract(emb[0, :], em))))
    if score < 0.8:
        scoreLs.append(imb.id)
        return scoreLs


def getEmb(image):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    margin = 44
    image_size = 160

    print('Creating networks and loading parameters')
    img = misc.imread(os.path.expanduser(image), mode='RGB')
    img_size = np.asarray(img.shape)[0:2]
    bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
    img_list = list()
    if len(bounding_boxes) >= 1:
        det = np.squeeze(bounding_boxes[0, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)

        img_list.append(prewhitened)
        images = np.stack(img_list)
        feed_dict = {images_placeholder: images, phase_train_placeholder: False}
        emb = sess.run(embeddings, feed_dict=feed_dict)
        return emb
    return '-1'


def str2array(s):
    s = s.replace('[', '')
    s = s.replace(']', '')
    ss = s.split(',')
    ls = list()
    for i in ss:
        ls.append(i)
    k = list(map(float, ls))
    na = np.array(k)
    return na


#拍照检测
# def cutTest(image):
#     minsize = 20  # minimum size of face
#     threshold = [0.6, 0.7, 0.7]  # three steps's threshold
#     factor = 0.709  # scale factor
#     print('Creating networks and loading parameters')
#     img = misc.imread(os.path.expanduser(image), mode='RGB')
#     bounding_boxes, points = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
#
#     eye1_w = points[0]
#     eye2_w = points[1]
#     nose_w = points[2]
#     mouth1_w = points[3]
#     mouth2_w = points[4]
#     eye1_h = points[5]
#     eye2_h = points[6]
#     nose_h = points[7]
#     mouth1_h = points[8]
#     mouth2_h = points[9]
#
#     eye_len = cal_len(eye1_w, eye1_h, eye2_w, eye2_h)
#     mouth_len = cal_len(mouth1_w, mouth1_h, mouth2_w, mouth2_h)
#     left_eye_nose_len = cal_len(eye1_w, eye1_h, nose_w, nose_h)
#     right_eye_nose_len = cal_len(eye2_w, eye2_h, nose_w, nose_h)
#     left_mouth_nose_len = cal_len(mouth1_w, mouth1_h, nose_w, nose_h)
#     right_mouth_nose_len = cal_len(mouth2_w, mouth2_h, nose_w, nose_h)
#     eye_nose_len = cal_len((eye1_w+eye2_w)/2, (eye1_h+eye2_h)/2, nose_w, nose_h)
#     mouth_nose_len = cal_len((mouth1_w+mouth2_w)/2, (mouth1_h+mouth2_h)/2, nose_w, nose_h)
#
#     print("眼睛距离", eye_len)
#     print("嘴间距离", mouth_len)
#     print("左眼鼻距离", left_eye_nose_len)
#     print("右眼鼻距离", right_eye_nose_len)
#     print("左嘴鼻距离", left_mouth_nose_len)
#     print("右嘴鼻距离", right_mouth_nose_len)
#
#     param1 = round(eye_len/mouth_len, 3)
#     param2 = round((right_eye_nose_len+left_eye_nose_len)/(right_mouth_nose_len+left_mouth_nose_len), 3)
#     param3 = round(eye_nose_len/mouth_nose_len, 3)
#
#     a = cal_dif(300, param1, param2, param3)
#     param = round(np.log10(a), 3)
#     return param


def cal_len(x1, y1, x2, y2):
    eye_em = np.array([[x1, y1], [x2, y2]])
    eye_len = np.sqrt(np.sum(np.square(np.subtract(eye_em[0, :], eye_em[1, :]))))
    return eye_len


def opt(image, minsize, threshold, factor, margin, image_size, img_list, image_paths):
    img = misc.imread(os.path.expanduser(image), mode='RGB')
    img_size = np.asarray(img.shape)[0:2]
    bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
    if len(bounding_boxes) >= 1:
        det = np.squeeze(bounding_boxes[0, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        img_list.append(prewhitened)
    else:
        image_paths.remove(image)
        print("can't detect face, remove ", image)


def load_and_align_data(image_paths, image_size, margin):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    print('Creating networks and loading parameters')
    tmp_image_paths = copy.copy(image_paths)
    img_list = []
    with ThreadPoolExecutor(8) as executor:
        for image in tmp_image_paths:
            executor.submit(opt, image, minsize, threshold, factor, margin, image_size, img_list, image_paths)
            # t = threading.Thread(target=opt, args=(image, minsize, threshold, factor, margin, image_size, img_list
            #                                        , image_paths))
            # t.start()
            # t.join()
    images = np.stack(img_list)
    return images


def parse_arguments(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('image_files', type=str, nargs='+', help='Images to compare')
    parser.add_argument('--image_size', type=int,
                        help='Image size (height, width) in pixels.', default=160)
    parser.add_argument('--margin', type=int,
                        help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
    parser.add_argument('--gpu_memory_fraction', type=float,
                        help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
    return parser.parse_args(argv)


def parse_arguments2(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('model', type=str,
                        help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
    parser.add_argument('--image_size', type=int,
                        help='Image size (height, width) in pixels.', default=160)
    parser.add_argument('--margin', type=int,
                        help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
    parser.add_argument('--gpu_memory_fraction', type=float,
                        help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
    return parser.parse_args(argv)


app = Flask(__name__)


def resize2(filepath):
    img = cv2.imread(filepath)
    img2 = cv2.resize(img, (160, 160), interpolation=cv2.INTER_AREA)
    cv2.imwrite(filepath, img2)


def writeImage(imPath, im):
    with open(imPath, "wb") as f:
        f.write(im)


#图片对比接口
@app.route("/compare", methods=["POST"])
def compare():
    #获取图片
    dic = json.loads(request.get_data())
    image1 = dic["image1"]
    # image2 = dic["image2"]
    im1 = base64.b64decode(image1)
    # im2 = base64.b64decode(image2)
    im1path = "C:/Users/hbw/facenet/src/img/"+str(uuid.uuid1())+".png"
    # im2path = "C:/Users/hbw/facenet/src/img/"+str(uuid.uuid1())+".png"
    #写入并处理图片
    writeImage(im1path, im1)
    # writeImage(im2path, im2)
    resize2(im1path)
    # resize2(im2path)
    #调用对比方法
    # imls = list()
    # imls.append(im1path)
    # imls.append(im2path)
    t = dict()
    t['data'] = main(im1path)
    #删除图片
    os.remove(im1path)
    # os.remove(im2path)
    #返回结果
    return json.dumps(t, ensure_ascii=False)


#图片检测接口
@app.route("/detect", methods=["post"])
def detect():
    #获取图片
    dic = json.loads(request.get_data())
    image1 = dic["image1"]
    im1 = base64.b64decode(image1)
    im1path = "C:/Users/hbw/facenet/src/img/"+str(uuid.uuid1())+".png"
    #写入图片
    writeImage(im1path, im1)
    # resize2(im1path)
    #调用检测方法
    # flag = dic["flag"]
    t = dict()
    t["data"] = str(test(im1path, "1"))
    #删除图片
    os.remove(im1path)
    #返回结果
    return json.dumps(t, ensure_ascii=False)


#拍照功能
@app.route("/cut", methods=["post"])
def cut():
    # 获取图片
    dic = json.loads(request.get_data())
    image1 = dic["image1"]
    im1 = base64.b64decode(image1)
    im1path = "C:/Users/hbw/facenet/src/img/" + str(uuid.uuid1()) + ".png"
    # 写入图片
    writeImage(im1path, im1)
    resize2(im1path)
    # 调用检测方法
    # imls = list()
    # imls.append(im1path)
    param = cutTest(im1path)
    print("param", param)
    t = dict()
    t["data"] = param
    # 删除图片
    os.remove(im1path)
    # 返回结果
    return json.dumps(t, ensure_ascii=False)


@app.route("/deleteall", methods=["post"])
def deleteAll():
    sqllite.removeImageAll()
    t = dict()
    t['data'] = '1'
    return json.dumps(t, ensure_ascii=False)


if __name__ == '__main__':
    #设置使用GPU
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 占用GPU90%的显存
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)
    #设置模型并运行
    models = list()
    models.append('C:/Users/hbw/facenet/src/20180402-114759/20180402-114759.pb')
    args = parse_arguments2(models)
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
        # with tf.Session() as sess:
            # Load the model
            facenet.load_model(args.model)
            # Get input and output tensors
            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            #人脸检测dblib加载
            detector = dlib.get_frontal_face_detector()
            predictor = dlib.shape_predictor('E:/dev/facerecognize/blib/shape_predictor_68_face_landmarks.dat')
            app.run(host='192.168.2.171', port=8000, debug=False, threaded=True)

 

 

 

 

 

sql:

import sqlite3


def saveImage(id, image, name):
    sql = 'insert into image (id, image, name) values ("'+id+'", "'+image+'", "'+name+'")'
    optTable(sql)


def queryImages():
    sql = 'select id, image, name from image'
    re = optTable2(sql)
    return re


def removeImageAll():
    sql = 'delete from image'
    optTable(sql)


def optTable(sql):
    conn = sqlite3.connect('test.db', check_same_thread=False)
    try:
        cursor = conn.cursor()
        cursor.execute(sql)
        cursor.close()
        conn.commit()
    finally:
        conn.close()


def optTable2(sql):
    conn = sqlite3.connect('test.db', check_same_thread=False)
    ims = list()
    try:
        cursor = conn.cursor()
        re = cursor.execute(sql)
        print("re", re)
        for row in re:
            imInfo = ImageInfo(row[0], row[1], row[2])
            ims.append(imInfo)
            print(row[0], row[1], row[2])
        return ims
    finally:
        conn.close()


class ImageInfo(object):
    def __init__(self, id, image, name):
        self.id = id
        self.image = image
        self.name = name


if __name__ == "__main__":
    print("main")
    ls = queryImages()
    for im in ls:
        print(im.id)
        print(im.image)
        print(im.name)