handtrackjs
1.声明变量
import * as handTrack from 'handtrackjs'
import {onMounted,ref} from 'vue'
const camera = ref(null)
const canvas = ref(null)
2.声明方法
onMounted(async()=>{
const context = canvas.value.getContext('2d')
const model = await handTrack.load({flipHorizontal:true});
const handDetection = async () => {
const predictions = await model.detect(camera.value)
model.renderPredictions(predictions,canvas.value,context,camera.value)
requestAnimationFrame(handDetection)
}
const status = await handTrack.startVideo(camera.value)
if(status.status){
const steam = await navigator.mediaDevices.getUserMedia({
video:true
})
camera.value.srcObj = steam
handDetection()
}
})
faceapijs
1.声明变量
import * as faceapi from 'face-api.js'
import {onMounted,ref} from 'vue'
const MODEL_PATH = './models'
const camera = ref(null)
声明方法
onMounted(async () => {
await faceapi.loadTinyFaceDetectorModel(MODEL_PATH)
await faceapi.loadFaceLandmarkTinyModel(MODEL_PATH)
await faceapi.loadFaceExpressionModel(MODEL_PATH)
await faceapi.loadAgeGenderModel(MODEL_PATH)
const steam = await navigator.mediaDevices.getUserMedia({
video : true
})
camera.value.srcObject = steam
const detectFace = async () => {
const canvas = faceapi.createCanvasFromMedia(camera.value)
const context = canvas.getContext('2d')
const width = camera.value.width
const height = camera.value.height
document.body.append(canvas)
setInterval(async()=>{
const detactions = await faceapi.detectAllFaces(
camera.value,
new faceapi.TinyFaceDetectorOptions()
).withFaceLandmarks(true)
.withFaceExpressions()
.withAgeAndGender()
const resizeDetections = faceapi.resizeResults(detactions,{width:640,height:480})
context.clearRect(0,0,640,480)
faceapi.draw.drawDetections(canvas,resizeDetections)
},300)
}
camera.value.addEventListener('play',detectFace)
})