// Copyright 2024 The Casdoor Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import * as faceapi from "face-api.js"; import React, {useState} from "react"; import {Button, Modal, Progress, Space, Spin, message} from "antd"; import i18next from "i18next"; import Dragger from "antd/es/upload/Dragger"; import * as Setting from "../../Setting"; const FaceRecognitionModal = (props) => { const {visible, onOk, onCancel, withImage} = props; const [modelsLoaded, setModelsLoaded] = React.useState(false); const [isCameraCaptured, setIsCameraCaptured] = useState(false); const videoRef = React.useRef(); const canvasRef = React.useRef(); const detection = React.useRef(null); const mediaStreamRef = React.useRef(null); const [percent, setPercent] = useState(0); const [files, setFiles] = useState([]); const [currentFaceId, setCurrentFaceId] = React.useState(); const [currentFaceIndex, setCurrentFaceIndex] = React.useState(); React.useEffect(() => { const loadModels = async() => { // const MODEL_URL = "https://justadudewhohacks.github.io/face-api.js/models"; const MODEL_URL = `${Setting.StaticBaseUrl}/casdoor/models`; Promise.all([ faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL), faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL), faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL), ]).then((val) => { setModelsLoaded(true); }).catch((err) => { message.error(i18next.t("login:Model loading failure")); onCancel(); }); }; loadModels(); }, []); React.useEffect(() => { if (withImage) { return; } if (visible) { setPercent(0); if (modelsLoaded) { navigator.mediaDevices .getUserMedia({video: {facingMode: "user"}}) .then((stream) => { mediaStreamRef.current = stream; setIsCameraCaptured(true); }).catch((error) => { handleCameraError(error); }); } } else { clearInterval(detection.current); detection.current = null; setIsCameraCaptured(false); } return () => { clearInterval(detection.current); detection.current = null; setIsCameraCaptured(false); }; }, [visible, modelsLoaded]); React.useEffect(() => { if (withImage) { return; } if (isCameraCaptured) { let count = 0; const interval = setInterval(() => { count++; if (videoRef.current) { videoRef.current.srcObject = mediaStreamRef.current; videoRef.current.play(); clearInterval(interval); } if (count >= 30) { clearInterval(interval); onCancel(); } }, 100); } else { mediaStreamRef.current?.getTracks().forEach(track => track.stop()); if (videoRef.current) { videoRef.current.srcObject = null; } } }, [isCameraCaptured]); const handleStreamVideo = () => { if (withImage) { return; } let count = 0; let goodCount = 0; if (!detection.current) { detection.current = setInterval(async() => { if (modelsLoaded && videoRef.current && visible) { const faces = await faceapi.detectAllFaces(videoRef.current, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceDescriptors(); count++; if (count % 50 === 0) { message.warning(i18next.t("login:Please ensure sufficient lighting and align your face in the center of the recognition box")); } else if (count > 300) { message.error(i18next.t("login:Face recognition failed")); onCancel(); } if (faces.length === 1) { const face = faces[0]; setPercent(Math.round(face.detection.score * 100)); const array = Array.from(face.descriptor); if (face.detection.score > 0.9) { goodCount++; if (face.detection.score > 0.99 || goodCount > 10) { clearInterval(detection.current); onOk(array); } } } else { setPercent(Math.round(percent / 2)); } } }, 100); } }; const handleCameraError = (error) => { onCancel(); if (error instanceof DOMException) { if (error.name === "NotFoundError" || error.name === "DevicesNotFoundError") { message.error(i18next.t("login:Please ensure that you have a camera device for facial recognition")); } else if (error.name === "NotAllowedError" || error.name === "PermissionDeniedError") { message.error(i18next.t("login:Please provide permission to access the camera")); } else if (error.name === "NotReadableError" || error.name === "TrackStartError") { message.error(i18next.t("login:The camera is currently in use by another webpage")); } else if (error.name === "TypeError") { message.error(i18next.t("login:Please load the webpage using HTTPS, otherwise the camera cannot be accessed")); } else { message.error(error.message); } } }; const getBase64 = (file) => { return new Promise((resolve, reject) => { const reader = new FileReader(); reader.readAsDataURL(file); reader.onload = () => resolve(reader.result); reader.onerror = (error) => reject(error); }); }; if (!withImage) { return (
Cancel , ]} >
{ modelsLoaded ?
:
}
); } else { return
{ onOk(Array.from(currentFaceId.descriptor)); }}> Ok , , ]}> { getBase64(file).then(res => { file.base64 = res; files.push(file); }); setCurrentFaceId([]); return false; }} onRemove={(file) => { const index = files.indexOf(file); const newFileList = files.slice(); newFileList.splice(index, 1); setFiles(newFileList); setCurrentFaceId([]); }} >

{i18next.t("general:Click to Upload")}

{ modelsLoaded ? : null }
{ currentFaceId && currentFaceId.length !== 0 ? (
{i18next.t("application:Select")}:{files[currentFaceIndex]?.name}
selected
) : null }
; } }; export default FaceRecognitionModal;