face-api 的简单使用

336 阅读4分钟

face-api 的简单使用

useFaceApi

import { ref } from "vue";
import * as faceApi from "face-api.js";

// 表情对应的中文名称
const expressionTranslation = {
  neutral: "中立",
  happy: "开心",
  sad: "伤心",
  angry: "生气",
  fearful: "害怕",
  disgusted: "厌恶",
  surprised: "惊讶",
};

// 加载faceapi模型
const loadModel = async () => {
  await faceApi.nets.tinyFaceDetector.loadFromUri("/models");
  await faceApi.nets.faceLandmark68Net.loadFromUri("/models");
  await faceApi.nets.faceRecognitionNet.loadFromUri("/models");
  await faceApi.nets.faceExpressionNet.loadFromUri("/models");
  await faceApi.nets.ssdMobilenetv1.loadFromUri("/models");
};

// 从监测数据中获取表情
const getTheExpression = (detections) => {
  return new Promise((resolve) => {
    // 如果检测到了人脸
    if (detections.length > 0) {
      // 找到得分最高的人脸检测结果
      const bestDetection = detections.reduce((best, current) => {
        return current.detection.score > best.detection.score ? current : best;
      });

      // 获取得分最高的人脸检测结果的表情数据
      const expressions = bestDetection.expressions;

      // 找到表情得分最高的表情
      const maxExpression = Object.keys(expressions).reduce((a, b) =>
        expressions[a] > expressions[b] ? a : b
      );

      // 返回得分最高的表情的翻译(假设 expressionTranslation 是一个将表情名称转换为对应的翻译的对象)
      resolve(expressionTranslation[maxExpression]);
    } else {
      // 如果没有检测到人脸,返回对应的消息
      resolve("没有检测到人脸");
    }
  });
};

/**
 * 加载模型
 * @returns 模型加载状态
 */
export const useLoadModel = () => {
  const loading = ref(true);
  const error = ref(false);
  loadModel()
    .then(() => {
      loading.value = false;
    })
    .catch(() => {
      error.value = true;
    });
  return { loading, error };
};

/**
 * 分析视频中的人脸表情
 */
export const useDetectVideoFace = (video, callback) => {
  const detectFace = async () => {
    const detections = await faceApi
      .detectAllFaces(video)
      .withFaceLandmarks()
      .withFaceExpressions();

    // 获取对应情绪
    const emotion = await getTheExpression(detections);
    callback(emotion);

    requestAnimationFrame(detectFace);
  };

  detectFace();
};

// 使用给定的图像文件检测人脸和表情
export const useDetectImageFace = (file) => {
  // 返回一个新的 Promise
  return new Promise((resolve, reject) => {
    // 创建一个新的 FileReader 对象
    const reader = new FileReader();

    // 当 FileReader 成功读取文件时
    reader.onload = (event) => {
      // 创建一个新的 Image 对象
      const img = new Image();

      // 当 Image 对象成功加载图像时
      img.onload = () => {
        // 使用 faceApi 库检测图像中的所有人脸和表情
        faceApi
          .detectAllFaces(img, new faceApi.TinyFaceDetectorOptions())
          .withFaceLandmarks()
          .withFaceExpressions()
          // 当成功检测到人脸和表情时
          .then((detections) => getTheExpression(detections)) // 使用 getTheExpression 函数获取最明显的表情
          .then((emotion) => resolve(emotion)) // 将最明显的表情作为 Promise 的结果
          // 当检测人脸和表情失败时
          .catch((error) => reject(error)); // 将错误作为 Promise 的结果
      };

      // 设置 Image 对象的源为 FileReader 读取的文件的数据 URL
      img.src = event.target.result;
    };

    // 尝试使用 FileReader 读取给定的文件
    try {
      reader.readAsDataURL(file);
    } catch (error) {
      // 如果读取文件失败
      // 在控制台中打印错误
      console.log(error);
      // 将错误作为 Promise 的结果
      reject(error);
    }
  });
};

分析视频中人脸的表情

<template>
  <div class="page">
    <div class="loading" v-if="loading">模型加载中...</div>
    <div class="error" v-else-if="error">模型加载失败!</div>
    <div class="video" v-else>
      <div class="content">
        <video ref="videoRef" autoplay muted></video>
        <span>您当前的表情是:{{ emotion }}</span>
      </div>
      <el-button type="primary" @click="startVideo">开始视频检测</el-button>
    </div>
  </div>
</template>

<script setup>
import { ref, shallowRef } from "vue";
import { useLoadModel, useDetectVideoFace } from "@/hooks/useFaceApi";

// 加载模型状态,加载模式失败
const { loading, error } = useLoadModel();

// 视频实例
const videoRef = shallowRef(null);
// 表情文字
const emotion = ref("");

// 获取摄像头权限并启动视频流
const startVideo = async () => {
  const video = videoRef.value;
  if (!video) return;
  const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
  video.srcObject = stream;
  // 等待视频开始播放
  await new Promise((resolve) => (video.onplaying = resolve));
  useDetectVideoFace(video, (emotionStr) => {
    emotion.value = emotionStr;
  });
};
</script>

<style scoped>
.page {
  width: 100%;
  height: 100%;
  overflow: hidden;
}

.loading,
.error {
  width: 100%;
  height: 100%;
  display: flex;
  justify-content: center;
  align-items: center;
}

.video {
  width: 100%;
  height: 100%;
  display: flex;
  flex-direction: column;
  justify-content: center;
  align-items: center;
}

.content {
  position: relative;
  width: 500px;
  height: 300px;
}
.content video {
  width: 100%;
  height: 100%;
  border: 1px solid #ccc;
  object-fit: cover;
}

.content span {
  position: absolute;
  top: 20px;
  right: -50px;
  background-color: #ccc;
}

.video .el-button {
  margin-top: 20px;
}
</style>

分析图片中人脸的表情

<template>
  <div class="detect">
    <div class="loading" v-if="loading">模型加载中...</div>
    <div class="error" v-else-if="error">模型加载失败!</div>
    <div class="content" v-else>
      <el-upload
        class="upload-demo"
        ref="upload"
        drag
        accept="image/png, image/jpeg"
        :auto-upload="false"
        :limit="1"
        v-model:file-list="fileList"
        :on-exceed="onExceedFile"
        :show-file-list="false"
      >
        <el-icon class="el-icon--upload"><upload-filled /></el-icon>
        <div class="el-upload__text">将文件放在此处或 <em>单击上传</em></div>
      </el-upload>

      <div class="image-list">
        <div class="item" v-for="(image, index) in fileList" :key="index">
          <img :src="genImgUrl(image.raw)" :alt="image.name" />
          <span class="emotion">
            这张图片的表情是:{{ emotions[index] ?? "计算中..." }}
          </span>
        </div>
      </div>
    </div>
  </div>
</template>

<script setup>
import { ref, shallowRef, watch } from "vue";
import { genFileId } from "element-plus";
import { UploadFilled } from "@element-plus/icons-vue";
import { useLoadModel, useDetectImageFace } from "@/hooks/useFaceApi";

// 加载模型状态,加载模式失败
const { loading, error } = useLoadModel();

// 上传组件实例
const upload = shallowRef(null);
// 文件列表
const fileList = ref([]);
const emotions = ref([]);

// 生成文件列表对应图片
const genImgUrl = (raw) => {
  return URL.createObjectURL(raw);
};

// 超出文件限制
const onExceedFile = (files, uploadFiles) => {
  // 清空文件
  upload.value.clearFiles();
  const file = files[0];
  // 生成文件 uid
  file.uid = genFileId();
  // 将文件状态设置为ready
  upload.value.handleStart(file);
};

watch(fileList, async (newFileList) => {
  try {
    const emotionsResult = await Promise.all(
      newFileList.map((file) => useDetectImageFace(file.raw))
    );
    emotions.value.push(...emotionsResult);
  } catch (error) {
    console.log("模型运行出错");
  }
});
</script>

<style scoped>
.detect {
  box-sizing: border-box;
  width: 100%;
  height: 100%;
  padding: 50px;
}

.loading,
.error {
  width: 100%;
  height: 100%;
  display: flex;
  justify-content: center;
  align-items: center;
}

.content {
  width: 100%;
  height: 100%;
  display: flex;
  flex-direction: column;
}

.image-list {
  flex: 1;
  display: flex;
  flex-direction: column;
  justify-content: center;
  overflow-x: hidden;
  overflow-y: auto;
}
.item {
  display: flex;
  flex-direction: column;
  justify-content: center;
  align-items: center;
}
.emotion {
  margin-top: 10px;
}
.image-list img {
  width: 400px;
  object-fit: cover;
  vertical-align: middle;
  border-radius: 10px;
}
</style>

对应源代码链接