如何使用 faceapi.js 从 base64 数据中检测人脸

问题描述 投票:0回答:1

我的目标是检测人脸并从视频帧中捕获图像,并使用 faceapi 检查捕获图像中的人脸位置。我成功地从视频帧中检测到人脸和捕获的图像,但无法从捕获的图像中检测到人脸。

这是我的js代码-

const video = document.getElementById('video')

Promise.all([
  faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
  faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
  faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
  faceapi.nets.faceExpressionNet.loadFromUri('/models'),
  faceapi.nets.ssdMobilenetv1.loadFromUri('/models')
]).then(startVideo)

function startVideo() {
  // navigator.getUserMedia(
  //   { video: {} },
  //   stream => video.srcObject = stream,
  //   err => console.error(err)
  // )

  navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia;

  if (navigator.getUserMedia) {
    navigator.getUserMedia({ audio: false, video: {} },
        function(stream) {
          var video = document.querySelector('video');
          video.srcObject = stream;
          video.onloadedmetadata = function(e) {
            video.play();
          };
        },
        function(err) {
          console.log("The following error occurred: " + err.name);
        }
    );
  } else {
    console.log("getUserMedia not supported");
  }
}

video.addEventListener('play', () => {
    const canvas = faceapi.createCanvasFromMedia(video)
    //document.body.append(canvas)
    const area = document.getElementById('container')
    area.append(canvas)
    const displaySize = { width: video.width, height: video.height }
    faceapi.matchDimensions(canvas, displaySize)
    setInterval(async () => {
        const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
        const count = await faceapi
            .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions())
            .withFaceLandmarks()
        if(count.length>1){
            alert("There are multiple faces in the frame. Please keep only one face");
        }
        else{
            const resizedDetections = faceapi.resizeResults(detections, displaySize)
            canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height)

            //faceapi.draw.drawDetections(canvas, resizedDetections)
            //faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
            //faceapi.draw.drawFaceExpressions(canvas, resizedDetections)

            // console.log(resizedDetections[0].detection._box._height)
            // console.log(resizedDetections[0].detection._box._width)
            // console.log(resizedDetections[0].detection._box._x)
            // console.log(resizedDetections[0].detection._box._y)

            var frame_height = resizedDetections[0].detection._box._height
            var frame_width = resizedDetections[0].detection._box._width
            var frame_x = resizedDetections[0].detection._box._x
            var frame_y = resizedDetections[0].detection._box._y

            const button = document.getElementById('button')

            if (((frame_height>=160 && frame_height<=270) && (frame_width>=180 && frame_width<=270)) &&
                ((frame_x>=100 && frame_x<=195) && (frame_y>=80 && frame_y<=190)))
            {
              button.disabled = false
              button.onclick = async function(){
              button.disabled = true
              await CaptureImage()
            };
            }

            else{
                button.disabled = true
            }
        }
    }, 100)
})

async function CaptureImage(){
  document.querySelectorAll("#screenshot img")
  .forEach(img => img.remove());

  const canvas = document.createElement('canvas');
  canvas.width = 239;
  canvas.height = 306;

  canvas.getContext('2d').drawImage(video, 0, 0, 239, 306);
  //canvas.getContext('2d').drawImage(video, 169, 47, 306, 392, 0, 0, 306, 392);

  //const img = document.createElement("img");

  const img = new Image()

  img.src = canvas.toDataURL('image/png', 1.0)

  //document.getElementById('picture').src= img.src
  document.getElementById('screenshot').appendChild(img)

  
  const detections = await faceapi.detectAllFaces(img, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
  
}

在 CaptureImage 函数的最后一行,我试图检测人脸,以便我可以检测图像中的人脸位置,但失败了。

这是我的html代码

<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <meta http-equiv="X-UA-Compatible" content="ie=edge">
  <title>Document</title>
  <script defer src="face-api.min.js"></script>
  <script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
  <script src="jquery.facedetection.min.js"></script>
  <script defer src="script.js"></script>
  <style>
    #container {
      margin: 0;
      padding: 0;
      width: 500px;
      height: 375px;
      display: flex;
      justify-content: center;
      align-items: center;
    }

    canvas {
      position: absolute;
    }

    #box {
      width: 239px;
      height: 306px;
      position: absolute;
      background-image: url("paper.png");
      background-repeat: no-repeat;
      /*background-size: 450px 325px;*/
    }
  </style>
</head>
<body>
  <div>
  <div id="container">
  <div id="box"></div>
  <video id="video" width="500" height="375" autoplay muted></video>
  </div>
  <button type="button" id="button" disabled>Click to take picture</button>
  </div>
  <div id="screenshot">
  </div>  
</body>
</html>
javascript face-api
1个回答
0
投票

通过以下方式,您可以在 face api js 中使用 base64 图像

  1. 将 base64 转换为 blob
  2. 将 blob 转换为可接受的人脸 api 图像格式

示例代码:

    const base64Response = await fetch(bas64ImageData);
    const blob = await base64Response.blob();
    const img = await faceapi.bufferToImage(blob);

    const detections = await faceapi.detectSingleFace(img, new faceapi.TinyFaceDetectorOptions());
© www.soinside.com 2019 - 2024. All rights reserved.