#javascript #reactjs #face-api
#язык JavaScript #реагирует на #face-api
Вопрос:
Я борюсь с этим.
import React, { useEffect, useState, useRef } from 'react'; import * as faceapi from 'face-api.js'; const FaceDetect = ({ image }) =gt; { const canvasRef = useRef(); const FaceModelFunc = async () =gt; { const canvass = faceapi.createCanvasFromMedia(image); canvasRef.current.innerHTML = canvass; const displaySize = { width: image.width, height: image.height }; faceapi.matchDimensions(canvasRef.current, displaySize); const detections = await faceapi.detectAllFaces(image, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions(); const resizedDetections = faceapi.resizeResults(detections, displaySize); canvasRef.current.getContext('2d').clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);// clearrect is basically erasing the canvas background by turning // // it transparent black faceapi.draw.drawDetections(canvasRef.current, resizedDetections); faceapi.draw.drawFaceLandmarks(canvasRef.current, resizedDetections); faceapi.draw.drawFaceExpressions(canvasRef.current, resizedDetections, .05); } useEffect(() =gt; { const laodModels = async () =gt; { Promise.all([ faceapi.nets.tinyFaceDetector.loadFromUri('./models'), faceapi.nets.faceLandmark68Net.loadFromUri('./models'), faceapi.nets.faceRecognitionNet.loadFromUri('./models'), faceapi.nets.faceExpressionNet.loadFromUri('./models') ]).then(() =gt; FaceModelFunc()); } laodModels(); }, []); return ( lt;gt; lt;img src={image} /gt; lt;canvas ref={canvasRef} /gt; lt;/gt;); } export default FaceDetect;
Ответ №1:
Я тоже работал над этим в последнее время. Это то, что работает для меня.
TLDR: Я думаю, что использование const canvas = faceapi.createCanvas(video);
вместо const canvas = faceapi.createCanvasFromMedia(video.srcObject);
того, чтобы решить проблему для меня. Затем мне просто пришлось вручную наложить холст поверх видео с помощью css:
#video { width: 100%; height: auto; position: absolute; top: 350px; left: 450px; } #canvas { position: absolute; top: 350px; left: 450px; z-index: 10; } #video-container { margin: 0 auto; margin-top: 50px; position: center; justify-content: center; }
Полный обновленный код:
import React, { useEffect, useState, useRef } from "react"; import * as faceapi from "face-api.js"; import WebCam from "react-webcam"; const LiveML = () =gt; { const [video, setVideo] = useState(null); const [canvas, setCanvas] = useState(null); const [isPlaying, setIsPlaying] = useState(false); const [isLoaded, setIsLoaded] = useState(false); const height = 560; const width = 720; const videoRef = useRef(null); const canvasRef = useRef(null); // Load models on page load useEffect(() =gt; { Promise.all([ faceapi.nets.tinyFaceDetector.loadFromUri("/models"), faceapi.nets.faceLandmark68Net.loadFromUri("/models"), faceapi.nets.faceRecognitionNet.loadFromUri("/models"), faceapi.nets.faceExpressionNet.loadFromUri("/models"), ]).then(() =gt; { if (navigator.mediaDevices.getUserMedia) { setVideo(document.getElementById("video")); navigator.mediaDevices .getUserMedia({ audio: false, video: true }) .then(function (stream) { //Display the video stream in the video object video.srcObject = stream; //Play the video stream video.play(); setIsLoaded(true); console.log("Video : " video); addEvent(); }) .catch(function (e) { console.log(e.name ": " e.message); }); } function addEvent() { video.addEventListener("play", () =gt; { console.log("addEvent"); //const canvas = faceapi.createCanvasFromMedia(video.srcObject); const canvas = faceapi.createCanvas(video); //video.append(canvas); canvas.id = "canvas"; document.querySelector("#video").append(canvas); document.body.append(canvas); const displaySize = { width: width, height: height }; faceapi.matchDimensions(canvas, displaySize); setInterval(async () =gt; { const detections = await faceapi .detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()) .withFaceLandmarks() .withFaceExpressions(); const resizedDetections = faceapi.resizeResults( detections, displaySize ); canvas .getContext("2d") .clearRect(0, 0, canvas.width, canvas.height); faceapi.draw.drawDetections(canvas, resizedDetections); faceapi.draw.drawFaceLandmarks(canvas, resizedDetections); faceapi.draw.drawFaceExpressions(canvas, resizedDetections); }, 100); console.log("Event added"); }); } console.log("models loaded"); }); }, []); console.log("Ready!"); return ( lt;div className="video-container"gt; {/*lt;video id="video" //src={video} ref={videoRef} autoPlay={true} playsInline muted style={{ width: "720px", height: "560px" }} /gt;*/} lt;WebCam id="video" //src={video} ref={videoRef} autoPlay={true} width={width} height={height} playsInline muted style={{ width: "720px", height: "560px" }} /gt; lt;canvas id="canvas" ref={canvasRef} style={{ width: "720px", height: "560px" }} /gt; lt;/divgt; ); //} }; export default LiveML;
Дайте мне знать, поможет ли это или вы уже нашли лучшее решение.