人脸识别登录大致分为两步,第一步识别人脸,第二步人脸比对
设计思路
- 准备一个人脸数据库,上传照片,并打上标签
- 提取人脸数据库中的照片和标签进行量化处理,转化成一堆数字进行比较
- 使用一张照片来测试一下匹配程度
- 调起浏览器摄像头进行脸部识别比对
代码实现
主要用到的库face-api.js
clone
git clone https://github.com/justadudewhohacks/face-api.js.git
cd face-api.js/examples/examples-browser
npm i
npm start
// localhost://3000
//node
cd face-api.js/examples/examples-nodejs
npm i
// ts-node examples
ts-node faceDetection.ts
代码
function dodetecpic(){
$.message.progress();
//加载训练好的模型(weight,bias)
// ageGenderNet 识别性别和年龄
// faceExpressionNet 识别表情,开心,沮丧,普通
// faceLandmark68TinyNet 识别脸脖特征用于tiny算法
// faceRecognitionNet 识别人脸
// ssdMobilenetV1 google开源Ai
// tinyFaceDetector 比Google的mobilenet更轻量级,速度更快一点
Promise.all([
faceapi.nets.faceRecognitionNet.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
faceapi.nets.faceLandmark68Net.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
faceapi.nets.faceLandmark68TinyNet.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
faceapi.nets.ssdMobilenetv1.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
faceapi.nets.tinyFaceDetector.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
faceapi.nets.mtcnn.loadFromUri('https://raw.githubusercontent.com/justadudewhohacks/face-api.js/master/weights'),
)].then(async()=>{
// 在原来的图片容器添加一层用于识别的蓝色框框
const container = document.createElement('div')
container.style.position = 'relative'
$('#picmodal').prepend(container)
// 先加载维护好的人脸数据(用于后面比对)
const labeledFaceDescriptors = await loadLabeledImages()
// 比对人脸特征数据
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors, 0.6)
// 获取输入图片
let image = document.getElementById('testpic')
// 根据图片大小创建一个图层,用于显示方框
let canvas = faceapi.createCanvasFromMedia(image)
container.prepend(canvas)
const displaySize = { width: image.width, height: image.height }
faceapi.matchDimensions(canvas, displaySize)
//设置需要使用什么算法和参数进行扫描识别图片的人脸特征
const options = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.38 })
//const options = new faceapi.TinyFaceDetectorOptions()
//const options = new faceapi.MtcnnOptions()
//开始获取图片中每一张人脸的特征数据
const detections = await faceapi.detectAllFaces(image, options).withFaceLandmarks().withFaceDescriptors()
//根据人脸轮廓的大小,调整方框的大小
const resizedDetections = faceapi.resizeResults(detections, displaySize)
//开始和事先准备的标签库比对,找出最符合的那个标签
const results = resizedDetections.map(d => faceMatcher.findBestMatch(d.descriptor))
console.log(results)
results.forEach((result, i) => {
//显示比对的结果
const box = resizedDetections[i].detection.box
const drawBox = new faceapi.draw.DrawBox(box, { label: result.toString() })
drawBox.draw(canvas)
console.log(box, drawBox)
})
$.messager.progress('close');
})
}
// 读取人脸标签数据
async function loadLabeledIamges(){
//获取人脸图片数据,包含:图片+标签
const data = await $.get('/FaceLibs/GetImgData'
//对图片按标签进行分类
const labels = [...new Set(data.map(item=>item.Label))]
retrun Promise.all(
labels.map(async label => {
const descriptions = []
const imgs = data.filter(item => item.Label = label)
const item = imgs[i]
const img = await faceapi.fetchImage(`${item.ImgUrl}`)
// 识别人脸的初始化参数
const options = new faceapi.SsdMobilenetv1Options({minConfidence:0.38})
// 扫描图片中人脸的轮廓数据
const detections = await faceapi.detectSingleFace(img, options).withFaceLandmarks().withFaceDescriptor()
if (detections) {
descriptions.push(detections.descriptor)
} else {
console.warn('Unrecognizable face')
}
}
console.log(label, descriptions);
return new faceapi.LabeledFaceDescriptors(label, descriptions)
})
)
}
// 最常用的图片识别方法,直接调用相应的方法
// all faces
await faceapi.detectAllFaces(input)
await faceapi.detectAllFaces(input).withFaceExpressions()
await faceapi.detectAllFaces(input).withFaceLandmarks()
await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()
await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()
await faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptors()
await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()
// single face
await faceapi.detectSingleFace(input)
await faceapi.detectSingleFace(input).withFaceExpressions()
await faceapi.detectSingleFace(input).withFaceLandmarks()
await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()
await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptor()
await faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptor()
await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()