face-api 的简单使用
useFaceApi
import { ref } from "vue";
import * as faceApi from "face-api.js";
const expressionTranslation = {
neutral: "中立",
happy: "开心",
sad: "伤心",
angry: "生气",
fearful: "害怕",
disgusted: "厌恶",
surprised: "惊讶",
};
const loadModel = async () => {
await faceApi.nets.tinyFaceDetector.loadFromUri("/models");
await faceApi.nets.faceLandmark68Net.loadFromUri("/models");
await faceApi.nets.faceRecognitionNet.loadFromUri("/models");
await faceApi.nets.faceExpressionNet.loadFromUri("/models");
await faceApi.nets.ssdMobilenetv1.loadFromUri("/models");
};
const getTheExpression = (detections) => {
return new Promise((resolve) => {
if (detections.length > 0) {
const bestDetection = detections.reduce((best, current) => {
return current.detection.score > best.detection.score ? current : best;
});
const expressions = bestDetection.expressions;
const maxExpression = Object.keys(expressions).reduce((a, b) =>
expressions[a] > expressions[b] ? a : b
);
resolve(expressionTranslation[maxExpression]);
} else {
resolve("没有检测到人脸");
}
});
};
export const useLoadModel = () => {
const loading = ref(true);
const error = ref(false);
loadModel()
.then(() => {
loading.value = false;
})
.catch(() => {
error.value = true;
});
return { loading, error };
};
export const useDetectVideoFace = (video, callback) => {
const detectFace = async () => {
const detections = await faceApi
.detectAllFaces(video)
.withFaceLandmarks()
.withFaceExpressions();
const emotion = await getTheExpression(detections);
callback(emotion);
requestAnimationFrame(detectFace);
};
detectFace();
};
export const useDetectImageFace = (file) => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = (event) => {
const img = new Image();
img.onload = () => {
faceApi
.detectAllFaces(img, new faceApi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceExpressions()
.then((detections) => getTheExpression(detections))
.then((emotion) => resolve(emotion))
.catch((error) => reject(error));
};
img.src = event.target.result;
};
try {
reader.readAsDataURL(file);
} catch (error) {
console.log(error);
reject(error);
}
});
};
分析视频中人脸的表情
<template>
<div class="page">
<div class="loading" v-if="loading">模型加载中...</div>
<div class="error" v-else-if="error">模型加载失败!</div>
<div class="video" v-else>
<div class="content">
<video ref="videoRef" autoplay muted></video>
<span>您当前的表情是:{{ emotion }}</span>
</div>
<el-button type="primary" @click="startVideo">开始视频检测</el-button>
</div>
</div>
</template>
<script setup>
import { ref, shallowRef } from "vue";
import { useLoadModel, useDetectVideoFace } from "@/hooks/useFaceApi";
// 加载模型状态,加载模式失败
const { loading, error } = useLoadModel();
// 视频实例
const videoRef = shallowRef(null);
// 表情文字
const emotion = ref("");
// 获取摄像头权限并启动视频流
const startVideo = async () => {
const video = videoRef.value;
if (!video) return;
const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
video.srcObject = stream;
// 等待视频开始播放
await new Promise((resolve) => (video.onplaying = resolve));
useDetectVideoFace(video, (emotionStr) => {
emotion.value = emotionStr;
});
};
</script>
<style scoped>
.page {
width: 100%;
height: 100%;
overflow: hidden;
}
.loading,
.error {
width: 100%;
height: 100%;
display: flex;
justify-content: center;
align-items: center;
}
.video {
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
.content {
position: relative;
width: 500px;
height: 300px;
}
.content video {
width: 100%;
height: 100%;
border: 1px solid #ccc;
object-fit: cover;
}
.content span {
position: absolute;
top: 20px;
right: -50px;
background-color: #ccc;
}
.video .el-button {
margin-top: 20px;
}
</style>
分析图片中人脸的表情
<template>
<div class="detect">
<div class="loading" v-if="loading">模型加载中...</div>
<div class="error" v-else-if="error">模型加载失败!</div>
<div class="content" v-else>
<el-upload
class="upload-demo"
ref="upload"
drag
accept="image/png, image/jpeg"
:auto-upload="false"
:limit="1"
v-model:file-list="fileList"
:on-exceed="onExceedFile"
:show-file-list="false"
>
<el-icon class="el-icon--upload"><upload-filled /></el-icon>
<div class="el-upload__text">将文件放在此处或 <em>单击上传</em></div>
</el-upload>
<div class="image-list">
<div class="item" v-for="(image, index) in fileList" :key="index">
<img :src="genImgUrl(image.raw)" :alt="image.name" />
<span class="emotion">
这张图片的表情是:{{ emotions[index] ?? "计算中..." }}
</span>
</div>
</div>
</div>
</div>
</template>
<script setup>
import { ref, shallowRef, watch } from "vue";
import { genFileId } from "element-plus";
import { UploadFilled } from "@element-plus/icons-vue";
import { useLoadModel, useDetectImageFace } from "@/hooks/useFaceApi";
// 加载模型状态,加载模式失败
const { loading, error } = useLoadModel();
// 上传组件实例
const upload = shallowRef(null);
// 文件列表
const fileList = ref([]);
const emotions = ref([]);
// 生成文件列表对应图片
const genImgUrl = (raw) => {
return URL.createObjectURL(raw);
};
// 超出文件限制
const onExceedFile = (files, uploadFiles) => {
// 清空文件
upload.value.clearFiles();
const file = files[0];
// 生成文件 uid
file.uid = genFileId();
// 将文件状态设置为ready
upload.value.handleStart(file);
};
watch(fileList, async (newFileList) => {
try {
const emotionsResult = await Promise.all(
newFileList.map((file) => useDetectImageFace(file.raw))
);
emotions.value.push(...emotionsResult);
} catch (error) {
console.log("模型运行出错");
}
});
</script>
<style scoped>
.detect {
box-sizing: border-box;
width: 100%;
height: 100%;
padding: 50px;
}
.loading,
.error {
width: 100%;
height: 100%;
display: flex;
justify-content: center;
align-items: center;
}
.content {
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
}
.image-list {
flex: 1;
display: flex;
flex-direction: column;
justify-content: center;
overflow-x: hidden;
overflow-y: auto;
}
.item {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
.emotion {
margin-top: 10px;
}
.image-list img {
width: 400px;
object-fit: cover;
vertical-align: middle;
border-radius: 10px;
}
</style>
对应源代码链接