handtrackjs和faceapijs

135 阅读1分钟

handtrackjs

1.声明变量

    import * as handTrack from 'handtrackjs'
    import {onMounted,ref} from 'vue'
    const camera = ref(null)
    const canvas = ref(null)

2.声明方法

onMounted(async()=>{
  // 获取canvas的2d画布对象
  const context = canvas.value.getContext('2d')
  // 获取手部追踪模型
  const model =  await handTrack.load({flipHorizontal:true});
  const handDetection = async () => {
    // 传入视频,进行手部追踪检测
    const predictions = await model.detect(camera.value)
    // 获取数据后进行渲染到canvas的2d画布中

    model.renderPredictions(predictions,canvas.value,context,camera.value)
    requestAnimationFrame(handDetection)
  }
  // 调用handtrack库的api传入dom,启动摄像头视频流并接入dom
  // 获得用户摄像头的访问权限状态
  const status = await handTrack.startVideo(camera.value)
  if(status.status){
    // 获取摄像头视频流,返回的参数是数据流
    const steam = await navigator.mediaDevices.getUserMedia({
      video:true
    })
    camera.value.srcObj = steam
    handDetection()
  }
})

faceapijs

1.声明变量

import * as faceapi from 'face-api.js'
import {onMounted,ref} from 'vue'
const MODEL_PATH = './models'
// 获取dom
const camera = ref(null)

声明方法

onMounted(async () => {
  //加载模型
  await faceapi.loadTinyFaceDetectorModel(MODEL_PATH)
  await faceapi.loadFaceLandmarkTinyModel(MODEL_PATH)
  await faceapi.loadFaceExpressionModel(MODEL_PATH)
  await faceapi.loadAgeGenderModel(MODEL_PATH)
  // 获取用户的摄像头数据流
  const steam = await navigator.mediaDevices.getUserMedia({
    video : true
  })
  camera.value.srcObject = steam
  // 检测脸部方法
  const detectFace = async () => {
    // 创建一个描述canvas
    const canvas = faceapi.createCanvasFromMedia(camera.value)
    // 获取上下文
    const context = canvas.getContext('2d')
    const width = camera.value.width
    const height = camera.value.height
    document.body.append(canvas)
    setInterval(async()=>{
      const detactions = await faceapi.detectAllFaces(
      camera.value,
      new faceapi.TinyFaceDetectorOptions()
    ).withFaceLandmarks(true)  // 需要检测脸部细节
    .withFaceExpressions() // 需要检测脸部表情
    .withAgeAndGender() // 需要检测年龄和性别
    // 使描述canvas的宽高与camera相适配
    const resizeDetections = faceapi.resizeResults(detactions,{width:640,height:480})
    context.clearRect(0,0,640,480)
    // 绘制描述canvas
    faceapi.draw.drawDetections(canvas,resizeDetections)
    },300)
   
  }
  
  // 调用
  camera.value.addEventListener('play',detectFace)
})