我正在开发一个项目,当视频中的人脸位于该视频的固定区域时,我需要自动从实时视频中拍摄帧的图片。我正在使用faceapi进行人脸检测。我尝试使用嘴、鼻子和眼睛坐标,但它不起作用,因为这些坐标正在变化,而且我在不同的地方找到相同的坐标,而且所有人的脸型都不相同。这是我的js代码-
const video = document.getElementById('video')
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models')
]).then(startVideo)
function startVideo() {
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = { width: video.width, height: video.height }
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
const count = await faceapi
.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
if(count.length>1){
alert("There are multiple faces in the frame. Please keep only one face");
}
else{
const resizedDetections = faceapi.resizeResults(detections, displaySize)
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
const landmarks = await faceapi.detectFaceLandmarks(video)
const mouth = landmarks.getMouth()
const nose = landmarks.getNose()
const leftEye = landmarks.getLeftEye();
console.log(mouth[0]._x, mouth[0]._y);
console.log(nose[0]._x, nose[0]._y)
console.log(leftEye[0]._x, leftEye[0]._y)
if((((mouth[0]._x<=330) && (mouth[0]._x>=280)) && ((mouth[0]._y<=310) && (mouth[0]._y>=260))) &&
(((nose[0]._x<=320) && (nose[0]._x>=270)) && ((nose[0]._y<=220) && (nose[0]._y>=160))) &&
(((leftEye[0]._x<=270) && (leftEye[0]._x>=210)) && ((leftEye[0]._y<=240) && (leftEye[0]._y>=180))))
{
alert("Click for perfect photo");
}
else{
//alert("Image not ready");
}
}
}, 100)
})
这是我的 html 代码:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<title>Document</title>
<script defer src="face-api.min.js"></script>
<script defer src="script.js"></script>
<style>
body {
margin: 0;
padding: 0;
width: 100vw;
height: 100vh;
display: flex;
justify-content: center;
align-items: center;
}
canvas {
position: absolute;
}
#box {
width: 450px;
height: 325px;
position: absolute;
background-image: url("paper.png");
background-repeat: no-repeat;
background-size: 450px 325px;
}
</style>
</head>
<body>
<div id="box"></div>
<video id="video" width="500" height="375" autoplay muted></video></body>
<br/>
</body>
</html>
我将图像放在视频帧上,因为我想当人脸位于该区域时自动单击图像。
您可以利用检测框的高度、宽度和坐标来计算人脸在框内的位置和比例。
const frame_height = resizedDetections[0].detection._box._height
const frame_width = resizedDetections[0].detection._box._width
const frame_x = resizedDetections[0].detection._box._x
const frame_y = resizedDetections[0].detection._box._y
您还可以使用绝对 CSS 位置添加一个附加框架,以呈现最佳框架,以便用户将头放入。我建议; -使用面部标志数组在最大面部尺寸的最佳尺寸的边界框中迭代它们 -使用面部边界框来确定面部的最小尺寸。 因此捕捉到最佳的脸部尺寸 =]
import React, { useState, useEffect, useRef } from "react";
import * as faceapi from "face-api.js";
import Loading from "./Loading";
import { mapAcne, mapPigmentation, mapPores, mapWrinkles } from "../common";
const WebcamComponent = () => {
const [initializing, setInitializing] = useState(true);
const [expressions, setExpressions] = useState([]);
const [age, setAge] = useState(null);
const [gender, setGender] = useState(null);
const [skinFeatures, setSkinFeatures] = useState({
skinColor: "",
wrinkles: "",
acne: "",
pores: "",
pigmentation: "",
});
const videoRef = useRef();
const videoCanvasRef = useRef();
const videoHeight = 400;
const videoWidth = 400;
useEffect(() => {
const loadModels = async () => {
const MODEL_URL = "/models"; // Update to the correct path
setInitializing(true);
await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL);
await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL);
await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL);
await faceapi.nets.faceExpressionNet.loadFromUri(MODEL_URL);
await faceapi.nets.ageGenderNet.loadFromUri(MODEL_URL);
startWebcam();
};
loadModels();
return () => {
if (videoRef.current.srcObject) {
videoRef.current.srcObject.getTracks().forEach((track) => track.stop());
}
};
}, []);
const startWebcam = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: true,
});
videoRef.current.srcObject = stream;
videoRef.current.onloadedmetadata = () => {
setInitializing(false);
};
} catch (error) {
console.error("getUserMedia Error: ", error);
}
};
const handleVideoPlay = () => {
setInterval(async () => {
if (initializing) {
setInitializing(false);
}
const displaySize = { width: videoWidth, height: videoHeight };
faceapi.matchDimensions(videoCanvasRef.current, displaySize);
const detections = await faceapi
.detectAllFaces(videoRef.current, new faceapi.TinyFaceDetectorOptions())
.withFaceLandmarks()
.withFaceExpressions()
.withAgeAndGender()
.withFaceDescriptors();
const resizedDetections = faceapi.resizeResults(detections, displaySize);
// Clear previous dots
const ctx = videoCanvasRef.current.getContext("2d");
ctx.clearRect(0, 0, videoWidth, videoHeight);
faceapi.draw.drawFaceLandmarks(videoCanvasRef.current, resizedDetections);
// Extract and set detected expressions and additional features
if (resizedDetections.length > 0) {
const expressionsObj = resizedDetections[0].expressions;
const detectedExpressions = Object.keys(expressionsObj).filter(
(expression) => expressionsObj[expression] > 0.7
);
const faceDescriptor = resizedDetections[0].descriptor;
const [
// skinColorValue,
wrinklesValue,
poresValue,
acneValue,
pigmentationValue,
] = faceDescriptor;
// Map numerical values to text descriptions for skin features
const mappedFeatures = {
wrinkles: mapWrinkles(Math.abs(wrinklesValue).toFixed(1)),
acne: mapAcne(Math.abs(acneValue).toFixed(1)),
pores: mapPores(Math.abs(poresValue).toFixed(1)),
pigmentation: mapPigmentation(Math.abs(pigmentationValue).toFixed(1)),
};
// Update state with mapped features
setSkinFeatures(mappedFeatures);
// Extract and set age and gender
setAge(resizedDetections[0].age);
setGender(resizedDetections[0].gender);
if (detectedExpressions.length > 0) {
setExpressions(detectedExpressions);
}
}
}, 1000);
};
return (
<div className="h-screen w-screen flex flex-row justify-between items-center border-2 border-white overflow-hidden">
{initializing && <Loading />}
<div className="h-full w-5/12 flex justify-center items-center relative">
<video
ref={videoRef}
className="h-full w-full object-fill"
autoPlay
playsInline
onPlay={handleVideoPlay}
muted
/>
<canvas
ref={videoCanvasRef}
width="100%"
height="100%"
className="absolute w-full h-full"
></canvas>
</div>
</div>
);