ps: demo基于vue3、unocss、vueuse
安装[face-api.js](GitHub - justadudewhohacks/face-api.js: JavaScript API for face detection and face recognition in the browser and nodejs with tensorflow.js)
pnpm add face-api.js,然后将[模型](face-api.js/weights at …)lic/models下
vue页面
<template>
<div w-45 h-45 bg-primary p-1.25 rounded-22.5 mx-auto>
<video
autoplay
muted
webkit-playsinline="true"
playsinline="true"
x5-video-player-type="h5-page"
x5-video-orientation="landscape|portrait"
ref="videoEl"
w-full
h-full
bg-black
rounded="50%"
overflow-hidden
></video>
</div>
</template>
<script setup lang="ts">
import {
captureVideoFrame,
countdownTimer,
downloadFile,
faceapiLivingEvent,
faceapiLivingEventHint,
getFaceGestureResult,
getFaceLandmarks,
getRandomElements,
sleep,
} from "/@/utils"
const streamWh = window.screen.width * window.devicePixelRatio
const { stream, isSupported } = useUserMedia({
enabled: true,
constraints: { video: { width: streamWh, height: streamWh }, audio: false },
})
const videoEl = ref<HTMLVideoElement | null>(null)
const reminder = ref("")
const authLoading = ref(false)
const events = reactive<Map<FaceapiGesture, boolean>>(new Map())
watchEffect(() => {
// 未检测是否有摄像头,需要可自行检测
if (!isSupported.value) {
showToast(t("浏览器不支持调用摄像头"))
return false
}
if (videoEl.value) videoEl.value.srcObject = stream.value!
})
watch(events, () => {
let x = 0
let nextEvent: FaceapiGesture | undefined = undefined
const eventArr = [...events.entries()]
eventArr.some(([e, b]) => {
b ? x++ : (nextEvent = e)
return !b
})
// 还有活体事件未完成
if (nextEvent) {
reminder.value = faceapiLivingEventHint[nextEvent]
livingEventStart(videoEl.value, nextEvent)
return false
}
// 已完成所有活体事件
reminder.value = "请保持静止"
setTimeout(async () => {
const image = await captureVideoFrame(videoEl.value as HTMLVideoElement)
image && downloadFile("auth.png", image)
authLoading.value = false
}, 2000)
})
// 开始检测
async function handleStart() {
authLoading.value = true
const video = videoEl.value as HTMLVideoElement
// video未播放则播放视频
if (video.paused || video.currentTime === 0) {
video.play()
}
await sleep(100)
const landmarks = await getFaceLandmarks(video, undefined, 0.6)
if (!landmarks) {
showToast("未检测到人脸,请确保脸部清晰完整,光线充足")
reminder.value = ""
authLoading.value = false
reminder.value = ""
return false
}
await countdownTimer(3, x => {
reminder.value = x.toString()
})
// 随机取两种事件
const eventArr = getRandomElements(faceapiLivingEvent, 2) as FaceapiGesture[]
eventArr.forEach(e => events.set(e, false))
}
// 循环验证活体事件
async function livingEventStart(
video: HTMLVideoElement | null,
e: FaceapiGesture
) {
if (!video || !e) return false
const result = await getFaceGestureResult(video, e)
if (!result) livingEventStart(video, e)
else events.set(e, true)
}
</script>
/utils/function.ts
// 判断浏览器环境
export enum BrowserEnv {
WeChat = "MicroMessenger",
Android = "Android",
Ios = "iPhone|iPad|iPod",
Chrome = "Chrome",
Safari = "^((?!chrome|android).)*safari",
}
export function isBrowserEnv(env: BrowserEnv) {
const userAgent = window?.navigator?.userAgent || ""
const reg = new RegExp(env, "i")
return reg.test(userAgent)
}
// 休眠
export function sleep(x: number) {
return new Promise(res => setTimeout(res, x))
}
// 倒计时
export function countdownTimer(x: number, cb: (x: number) => void | undefined) {
return new Promise(res => {
const timer = setInterval(() => {
if (x === 0) {
clearInterval(timer)
res(true)
return false
}
cb && cb(x)
x--
}, 1000)
})
}
// 数组随机取值
export function getRandomElements(arr: any[], n: number) {
// 创建一个数组副本,避免改变原数组
const tempArray = arr.slice()
const results = []
let len = tempArray.length
const taken: any[] = []
if (n > len) return null
while (n--) {
const x = Math.floor(Math.random() * len)
results[n] = tempArray[x in taken ? taken[x] : x]
// 标记已选择的元素
taken[x] = --len in taken ? taken[len] : len
}
return results
}
/utlis/faceapi.ts ,相同距离安卓和ios的前置摄像头获取到的画面大小不一致,需要差异处理
import * as faceapi from "face-api.js"
import { sleep } from "/@/utils"
type FaceapiFeature =
| "getJawOutline"
| "getNose"
| "getMouth"
| "getLeftEye"
| "getRightEye"
| "getLeftEyeBrow"
| "getRightEyeBrow"
export type FaceapiGesture = "nod" | "shake" | "mouth" | "eye"
type FaceapiAxis = "x" | "y"
type FaceapiBox = "width" | "height"
// 获取坐标数据的方法名称,主检测坐标索引,检测坐标轴线,副检测坐标索引或轴线
const faceapiGestureGetDataMap: Map<
FaceapiGesture,
[FaceapiFeature, number, FaceapiAxis, number | FaceapiAxis]
> = new Map([
["nod", ["getNose", 4, "y", "x"]],
["shake", ["getNose", 4, "x", "y"]],
["mouth", ["getMouth", 10, "y", 3]],
["eye", ["getLeftEye", 1, "y", 4]],
])
// 获取人脸框选宽高、活体动作特征占全脸比例、活体动作取值间隔
const faceapiGestureThersholdMap: Map<string, [FaceapiBox, number, number]> =
new Map([
["nod", ["height", 0.3, 2]],
["shake", ["width", 0.8, 5]],
["mouth", ["height", 0.12, 1]],
["eye", ["height", 0.01, 0]],
])
const modelsPath = import.meta.env.VITE_PUBLIC_PATH + "/models"
faceapi.nets.ssdMobilenetv1.loadFromUri(modelsPath)
faceapi.nets.faceLandmark68Net.loadFromUri(modelsPath)
async function getFaceLandmarks(
video: HTMLVideoElement,
score = 0.5,
time = 0
) {
await sleep(time)
const detections = await faceapi
.detectSingleFace(video, new faceapi.SsdMobilenetv1Options())
.withFaceLandmarks()
if (!detections?.detection.score || detections?.detection.score < score)
return undefined
return detections
}
function* generatorFaceLandmarks(video: HTMLVideoElement, time = 8, gap = 0) {
for (let i = time; i > 0; i--) {
yield getFaceLandmarks(video, 0.5, gap)
}
}
// 活体检测动作,获取n组坐标,计算坐标的平均值,设置一个阈值来判断动作,如果检测局部特征需要配置静止点坐标防止整体动作
async function getFaceGestureResult(
video: HTMLVideoElement,
gesture: FaceapiGesture,
n: number = 6
) {
const [dataFn, index, axis, rest] = faceapiGestureGetDataMap.get(gesture)!
const [direction, ratio, gap] = faceapiGestureThersholdMap.get(gesture)!
let thershold: number = 0
if (!dataFn || !direction) return false
const coords: number[] = []
const restCoords: number[] = []
const it = generatorFaceLandmarks(video, n, gap)
for (const itItem of it) {
const detections = await itItem
if (!detections) continue
if (thershold === 0) {
const streamBox = {
width: video.videoWidth,
height: video.videoHeight,
}
const len = detections?.detection.box[direction]
thershold = (len * ratio) / (streamBox[direction] / len)
console.log(
len,
len * ratio,
streamBox[direction],
streamBox[direction] / len
)
}
const coord = detections?.landmarks[dataFn]()
if (Array.isArray(coord)) {
coords.push(coord[index][axis])
if (typeof rest === "number") {
restCoords.push(coord[rest][axis])
} else if (typeof rest === "string") {
restCoords.push(coord[index][rest])
}
console.log(coord[index][axis])
}
}
const ave = coords.reduce((acc, a) => acc + a, 0) / coords.length
const pass = coords.filter(a => Math.abs(a - ave) > thershold)
console.log(
thershold,
ave,
Math.max(...restCoords) - Math.min(...restCoords),
pass,
coords.map(x => [x, Math.abs(x - ave)])
)
return (
pass.length >= ((n / 2) | 0) &&
(!restCoords.length ||
Math.max(...restCoords) - Math.min(...restCoords) < thershold)
)
}
const faceapiLivingEvent: FaceapiGesture[] = ["nod", "shake", "mouth"]
const faceapiLivingEventHint: Record<FaceapiGesture, string> = {
nod: "请点头",
shake: "请摇头",
mouth: "请张嘴",
eye: "请眨眼",
}
export {
faceapi,
faceapiLivingEvent,
faceapiLivingEventHint,
getFaceGestureResult,
getFaceLandmarks,
}