init
Some checks failed
Close stale issues and PRs / stale (push) Has been cancelled

This commit is contained in:
2025-09-02 14:49:16 +08:00
commit 38ba663466
2885 changed files with 391107 additions and 0 deletions

View File

@@ -0,0 +1,355 @@
import 'image-capture';
import { IStore } from '../app/types';
import { isMobileBrowser } from '../base/environment/utils';
import { getLocalVideoTrack } from '../base/tracks/functions';
import { getBaseUrl } from '../base/util/helpers';
import {
addFaceLandmarks,
clearFaceExpressionBuffer,
newFaceBox
} from './actions';
import {
DETECTION_TYPES,
DETECT_FACE,
FACE_LANDMARKS_DETECTION_ERROR_THRESHOLD,
INIT_WORKER,
NO_DETECTION,
NO_FACE_DETECTION_THRESHOLD,
WEBHOOK_SEND_TIME_INTERVAL
} from './constants';
import {
getDetectionInterval,
sendFaceExpressionsWebhook
} from './functions';
import logger from './logger';
/**
* Class for face language detection.
*/
class FaceLandmarksDetector {
private static instance: FaceLandmarksDetector;
private initialized = false;
private imageCapture: ImageCapture | null = null;
private worker: Worker | null = null;
private lastFaceExpression: string | null = null;
private lastFaceExpressionTimestamp: number | null = null;
private webhookSendInterval: number | null = null;
private detectionInterval: number | null = null;
private recognitionActive = false;
private canvas?: HTMLCanvasElement;
private context?: CanvasRenderingContext2D | null;
private errorCount = 0;
private noDetectionCount = 0;
private noDetectionStartTimestamp: number | null = null;
/**
* Constructor for class, checks if the environment supports OffscreenCanvas.
*/
private constructor() {
if (typeof OffscreenCanvas === 'undefined') {
this.canvas = document.createElement('canvas');
this.context = this.canvas.getContext('2d');
}
}
/**
* Function for retrieving the FaceLandmarksDetector instance.
*
* @returns {FaceLandmarksDetector} - FaceLandmarksDetector instance.
*/
public static getInstance(): FaceLandmarksDetector {
if (!FaceLandmarksDetector.instance) {
FaceLandmarksDetector.instance = new FaceLandmarksDetector();
}
return FaceLandmarksDetector.instance;
}
/**
* Returns if the detected environment is initialized.
*
* @returns {boolean}
*/
isInitialized(): boolean {
return this.initialized;
}
/**
* Initialization function: the worker is loaded and initialized, and then if possible the detection stats.
*
* @param {IStore} store - Redux store with dispatch and getState methods.
* @returns {void}
*/
init({ dispatch, getState }: IStore) {
if (this.isInitialized()) {
logger.info('Worker has already been initialized');
return;
}
if (isMobileBrowser() || navigator.product === 'ReactNative') {
logger.warn('Unsupported environment for face detection');
return;
}
const baseUrl = `${getBaseUrl()}libs/`;
let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
// @ts-ignore
const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
const state = getState();
const addToBuffer = Boolean(state['features/base/config'].webhookProxyUrl);
// @ts-ignore
workerUrl = window.URL.createObjectURL(workerBlob);
this.worker = new Worker(workerUrl, { name: 'Face Landmarks Worker' });
this.worker.onmessage = ({ data }: MessageEvent<any>) => {
const { faceExpression, faceBox, faceCount } = data;
const messageTimestamp = Date.now();
// if the number of faces detected is different from 1 we do not take into consideration that detection
if (faceCount !== 1) {
if (this.noDetectionCount === 0) {
this.noDetectionStartTimestamp = messageTimestamp;
}
this.noDetectionCount++;
if (this.noDetectionCount === NO_FACE_DETECTION_THRESHOLD && this.noDetectionStartTimestamp) {
this.addFaceLandmarks(
dispatch,
this.noDetectionStartTimestamp,
NO_DETECTION,
addToBuffer
);
}
return;
} else if (this.noDetectionCount > 0) {
this.noDetectionCount = 0;
this.noDetectionStartTimestamp = null;
}
if (faceExpression?.expression) {
const { expression } = faceExpression;
if (expression !== this.lastFaceExpression) {
this.addFaceLandmarks(
dispatch,
messageTimestamp,
expression,
addToBuffer
);
}
}
if (faceBox) {
dispatch(newFaceBox(faceBox));
}
APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
};
const { faceLandmarks } = state['features/base/config'];
const detectionTypes = [
faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
].filter(Boolean);
this.worker.postMessage({
type: INIT_WORKER,
baseUrl,
detectionTypes
});
this.initialized = true;
this.startDetection({
dispatch,
getState
});
}
/**
* The function which starts the detection process.
*
* @param {IStore} store - Redux store with dispatch and getState methods.
* @param {any} track - Track from middleware; can be undefined.
* @returns {void}
*/
startDetection({ dispatch, getState }: IStore, track?: any) {
if (!this.isInitialized()) {
logger.info('Worker has not been initialized');
return;
}
if (this.recognitionActive) {
logger.log('Face landmarks detection already active.');
return;
}
const state = getState();
const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
if (!localVideoTrack || localVideoTrack.jitsiTrack?.isMuted()) {
logger.debug('Face landmarks detection is disabled due to missing local track.');
return;
}
const stream = localVideoTrack.jitsiTrack.getOriginalStream();
const firstVideoTrack = stream.getVideoTracks()[0];
this.imageCapture = new ImageCapture(firstVideoTrack);
this.recognitionActive = true;
logger.log('Start face landmarks detection');
const { faceLandmarks } = state['features/base/config'];
this.detectionInterval = window.setInterval(() => {
if (this.worker && this.imageCapture) {
this.sendDataToWorker(
faceLandmarks?.faceCenteringThreshold
).then(status => {
if (status) {
this.errorCount = 0;
} else if (++this.errorCount > FACE_LANDMARKS_DETECTION_ERROR_THRESHOLD) {
/* this prevents the detection from stopping immediately after occurring an error
* sometimes due to the small detection interval when starting the detection some errors
* might occur due to the track not being ready
*/
this.stopDetection({
dispatch,
getState
});
}
});
}
}, getDetectionInterval(state));
const { webhookProxyUrl } = state['features/base/config'];
if (faceLandmarks?.enableFaceExpressionsDetection && webhookProxyUrl) {
this.webhookSendInterval = window.setInterval(async () => {
const result = await sendFaceExpressionsWebhook(getState());
if (result) {
dispatch(clearFaceExpressionBuffer());
}
}, WEBHOOK_SEND_TIME_INTERVAL);
}
}
/**
* The function which stops the detection process.
*
* @param {IStore} store - Redux store with dispatch and getState methods.
* @returns {void}
*/
stopDetection({ dispatch, getState }: IStore) {
if (!this.recognitionActive || !this.isInitialized()) {
return;
}
const stopTimestamp = Date.now();
const addToBuffer = Boolean(getState()['features/base/config'].webhookProxyUrl);
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
this.addFaceLandmarks(dispatch, stopTimestamp, null, addToBuffer);
}
this.webhookSendInterval && window.clearInterval(this.webhookSendInterval);
this.detectionInterval && window.clearInterval(this.detectionInterval);
this.webhookSendInterval = null;
this.detectionInterval = null;
this.imageCapture = null;
this.recognitionActive = false;
logger.log('Stop face landmarks detection');
}
/**
* Dispatches the action for adding new face landmarks and changes the state of the class.
*
* @param {IStore.dispatch} dispatch - The redux dispatch function.
* @param {number} endTimestamp - The timestamp when the face landmarks ended.
* @param {string} newFaceExpression - The new face expression.
* @param {boolean} addToBuffer - Flag for adding the face landmarks to the buffer.
* @returns {void}
*/
private addFaceLandmarks(
dispatch: IStore['dispatch'],
endTimestamp: number,
newFaceExpression: string | null,
addToBuffer = false) {
if (this.lastFaceExpression && this.lastFaceExpressionTimestamp) {
dispatch(addFaceLandmarks(
{
duration: endTimestamp - this.lastFaceExpressionTimestamp,
faceExpression: this.lastFaceExpression,
timestamp: this.lastFaceExpressionTimestamp
},
addToBuffer
));
}
this.lastFaceExpression = newFaceExpression;
this.lastFaceExpressionTimestamp = endTimestamp;
}
/**
* Sends the image data a canvas from the track in the image capture to the face detection worker.
*
* @param {number} faceCenteringThreshold - Movement threshold as percentage for sharing face coordinates.
* @returns {Promise<boolean>} - True if sent, false otherwise.
*/
private async sendDataToWorker(faceCenteringThreshold = 10): Promise<boolean> {
if (!this.imageCapture
|| !this.worker
|| !this.imageCapture) {
logger.log('Environment not ready! Could not send data to worker');
return false;
}
// if ImageCapture is polyfilled then it would not have the track,
// so there would be no point in checking for its readyState
if (this.imageCapture.track && this.imageCapture.track.readyState !== 'live') {
logger.log('Track not ready! Could not send data to worker');
return false;
}
let imageBitmap;
let image;
try {
imageBitmap = await this.imageCapture.grabFrame();
} catch (err) {
logger.log('Could not send data to worker');
return false;
}
if (typeof OffscreenCanvas === 'undefined' && this.canvas && this.context) {
this.canvas.width = imageBitmap.width;
this.canvas.height = imageBitmap.height;
this.context.drawImage(imageBitmap, 0, 0);
image = this.context.getImageData(0, 0, imageBitmap.width, imageBitmap.height);
} else {
image = imageBitmap;
}
this.worker.postMessage({
type: DETECT_FACE,
image,
threshold: faceCenteringThreshold
});
imageBitmap.close();
return true;
}
}
export default FaceLandmarksDetector.getInstance();

View File

@@ -0,0 +1,250 @@
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
import { Config, FaceResult, Human } from '@vladmandic/human';
import { DETECTION_TYPES, FACE_DETECTION_SCORE_THRESHOLD, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
import { DetectInput, DetectOutput, FaceBox, FaceExpression, InitInput } from './types';
export interface IFaceLandmarksHelper {
detect: ({ image, threshold }: DetectInput) => Promise<DetectOutput>;
getDetectionInProgress: () => boolean;
getDetections: (image: ImageBitmap | ImageData) => Promise<Array<FaceResult>>;
getFaceBox: (detections: Array<FaceResult>, threshold: number) => FaceBox | undefined;
getFaceCount: (detections: Array<FaceResult>) => number;
getFaceExpression: (detections: Array<FaceResult>) => FaceExpression | undefined;
init: () => Promise<void>;
}
/**
* Helper class for human library.
*/
export class HumanHelper implements IFaceLandmarksHelper {
protected human: Human | undefined;
protected faceDetectionTypes: string[];
protected baseUrl: string;
private detectionInProgress = false;
private lastValidFaceBox: FaceBox | undefined;
/**
* Configuration for human.
*/
private config: Partial<Config> = {
backend: 'humangl',
async: true,
warmup: 'none',
cacheModels: true,
cacheSensitivity: 0,
debug: false,
deallocate: true,
filter: { enabled: false },
face: {
enabled: false,
detector: {
enabled: false,
rotation: false,
modelPath: 'blazeface-front.json',
maxDetected: 20
},
mesh: { enabled: false },
iris: { enabled: false },
emotion: {
enabled: false,
modelPath: 'emotion.json'
},
description: { enabled: false }
},
hand: { enabled: false },
gesture: { enabled: false },
body: { enabled: false },
segmentation: { enabled: false }
};
/**
* Constructor function for the helper which initialize the helper.
*
* @param {InitInput} input - The input for the helper.
* @returns {void}
*/
constructor({ baseUrl, detectionTypes }: InitInput) {
this.faceDetectionTypes = detectionTypes;
this.baseUrl = baseUrl;
this.init();
}
/**
* Initializes the human helper with the available tfjs backend for the given detection types.
*
* @returns {Promise<void>}
*/
async init(): Promise<void> {
if (!this.human) {
this.config.modelBasePath = this.baseUrl;
if (!self.OffscreenCanvas) {
this.config.backend = 'wasm';
this.config.wasmPath = this.baseUrl;
setWasmPaths(this.baseUrl);
}
if (this.faceDetectionTypes.length > 0 && this.config.face) {
this.config.face.enabled = true;
}
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX) && this.config.face?.detector) {
this.config.face.detector.enabled = true;
}
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS) && this.config.face?.emotion) {
this.config.face.emotion.enabled = true;
}
const initialHuman = new Human(this.config);
try {
await initialHuman.load();
} catch (err) {
console.error(err);
}
this.human = initialHuman;
}
}
/**
* Gets the face box from the detections, if there is no valid detections it will return undefined..
*
* @param {Array<FaceResult>} detections - The array with the detections.
* @param {number} threshold - Face box position change threshold.
* @returns {FaceBox | undefined}
*/
getFaceBox(detections: Array<FaceResult>, threshold: number): FaceBox | undefined {
if (this.getFaceCount(detections) !== 1) {
return;
}
const faceBox: FaceBox = {
// normalize to percentage based
left: Math.round(detections[0].boxRaw[0] * 100),
right: Math.round((detections[0].boxRaw[0] + detections[0].boxRaw[2]) * 100)
};
faceBox.width = Math.round(faceBox.right - faceBox.left);
if (this.lastValidFaceBox && threshold && Math.abs(this.lastValidFaceBox.left - faceBox.left) < threshold) {
return;
}
this.lastValidFaceBox = faceBox;
return faceBox;
}
/**
* Gets the face expression from the detections, if there is no valid detections it will return undefined.
*
* @param {Array<FaceResult>} detections - The array with the detections.
* @returns {string | undefined}
*/
getFaceExpression(detections: Array<FaceResult>): FaceExpression | undefined {
if (this.getFaceCount(detections) !== 1) {
return;
}
const detection = detections[0];
if (detection.emotion) {
return {
expression: FACE_EXPRESSIONS_NAMING_MAPPING[detection.emotion[0].emotion],
score: detection.emotion[0].score
};
}
}
/**
* Gets the face count from the detections, which is the number of detections.
*
* @param {Array<FaceResult>} detections - The array with the detections.
* @returns {number}
*/
getFaceCount(detections: Array<FaceResult> | undefined): number {
if (detections) {
return detections.length;
}
return 0;
}
/**
* Gets the detections from the image captured from the track.
*
* @param {ImageBitmap | ImageData} image - The image captured from the track,
* if OffscreenCanvas available it will be ImageBitmap, otherwise it will be ImageData.
* @returns {Promise<Array<FaceResult>>}
*/
async getDetections(image: ImageBitmap | ImageData): Promise<Array<FaceResult>> {
if (!this.human || !this.faceDetectionTypes.length) {
return [];
}
this.human.tf.engine().startScope();
const imageTensor = this.human.tf.browser.fromPixels(image);
const { face: detections } = await this.human.detect(imageTensor, this.config);
this.human.tf.engine().endScope();
return detections.filter(detection => detection.score > FACE_DETECTION_SCORE_THRESHOLD);
}
/**
* Gathers together all the data from the detections, it's the function that will be called in the worker.
*
* @param {DetectInput} input - The input for the detections.
* @returns {Promise<DetectOutput>}
*/
public async detect({ image, threshold }: DetectInput): Promise<DetectOutput> {
let faceExpression;
let faceBox;
this.detectionInProgress = true;
const detections = await this.getDetections(image);
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
faceExpression = this.getFaceExpression(detections);
}
if (this.faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
// if more than one face is detected the face centering will be disabled.
if (this.getFaceCount(detections) > 1) {
this.faceDetectionTypes.splice(this.faceDetectionTypes.indexOf(DETECTION_TYPES.FACE_BOX), 1);
// face-box for re-centering
faceBox = {
left: 0,
right: 100,
width: 100
};
} else {
faceBox = this.getFaceBox(detections, threshold);
}
}
this.detectionInProgress = false;
return {
faceExpression,
faceBox,
faceCount: this.getFaceCount(detections)
};
}
/**
* Returns the detection state.
*
* @returns {boolean}
*/
public getDetectionInProgress(): boolean {
return this.detectionInProgress;
}
}

View File

@@ -0,0 +1,40 @@
/**
* Redux action type dispatched in order to add real-time faceLandmarks to timeline.
*
* {
* type: ADD_FACE_LANDMARKS,
* faceLandmarks: FaceLandmarks
* }
*/
export const ADD_FACE_LANDMARKS = 'ADD_FACE_LANDMARKS';
/**
* Redux action type dispatched in order to clear the faceLandmarks buffer for webhook in the state.
*
* {
* type: CLEAR_FACE_LANDMARKS_BUFFER
* }
*/
export const CLEAR_FACE_LANDMARKS_BUFFER = 'CLEAR_FACE_LANDMARKS_BUFFER';
/**
* Redux action type dispatched in order to update coordinates of a detected face.
*
* {
* type: UPDATE_FACE_COORDINATES,
* faceBox: Object({ left, bottom, right, top }),
* participantId: string
* }
*/
export const UPDATE_FACE_COORDINATES = 'UPDATE_FACE_COORDINATES';
/**
* Redux action type dispatched in order to signal new face coordinates were obtained for the local participant.
*
* {
* type: NEW_FACE_COORDINATES,
* faceBox: Object({ left, bottom, right, top }),
* participantId: string
* }
*/
export const NEW_FACE_COORDINATES = 'NEW_FACE_COORDINATES';

View File

@@ -0,0 +1,48 @@
import 'image-capture';
import { AnyAction } from 'redux';
import {
ADD_FACE_LANDMARKS,
CLEAR_FACE_LANDMARKS_BUFFER,
NEW_FACE_COORDINATES
} from './actionTypes';
import { FaceBox, FaceLandmarks } from './types';
/**
* Adds new face landmarks to the timeline.
*
* @param {FaceLandmarks} faceLandmarks - The new face landmarks to timeline.
* @param {boolean} addToBuffer - If true adds the face landmarks to a buffer in the reducer for webhook.
* @returns {AnyAction}
*/
export function addFaceLandmarks(faceLandmarks: FaceLandmarks, addToBuffer: boolean): AnyAction {
return {
type: ADD_FACE_LANDMARKS,
faceLandmarks,
addToBuffer
};
}
/**
* Clears the face landmarks array in the state.
*
* @returns {AnyAction}
*/
export function clearFaceExpressionBuffer(): AnyAction {
return {
type: CLEAR_FACE_LANDMARKS_BUFFER
};
}
/**
* Signals that a new face box was obtained for the local participant.
*
* @param {FaceBox} faceBox - The face box of the local participant.
* @returns {AnyAction}
*/
export function newFaceBox(faceBox: FaceBox): AnyAction {
return {
type: NEW_FACE_COORDINATES,
faceBox
};
}

View File

@@ -0,0 +1,83 @@
export const FACE_EXPRESSIONS_EMOJIS = {
happy: '😊',
neutral: '😐',
sad: '🙁',
surprised: '😮',
angry: '😠',
fearful: '😨'
// disgusted: '🤢'
};
export const FACE_EXPRESSIONS = [ 'happy', 'neutral', 'sad', 'surprised', 'angry', 'fearful' ];
export const FACE_EXPRESSIONS_NAMING_MAPPING = {
happy: 'happy',
neutral: 'neutral',
surprise: 'surprised',
angry: 'angry',
fear: 'fearful',
disgust: 'disgusted',
sad: 'sad'
};
/**
* Time is ms used for sending expression.
*/
export const WEBHOOK_SEND_TIME_INTERVAL = 15000;
/**
* Type of message sent from main thread to worker that contains init information:
* such as models directory and window screen size.
*/
export const INIT_WORKER = 'INIT_WORKER';
/**
* Type of event sent on the data channel.
*/
export const FACE_BOX_EVENT_TYPE = 'face-box';
/**
* Type of event sent on the data channel.
*/
export const FACE_LANDMARKS_EVENT_TYPE = 'face-landmarks';
/**
* Milliseconds interval value for sending new image data to the worker.
*/
export const SEND_IMAGE_INTERVAL_MS = 1000;
/**
* Type of message sent from main thread to worker that contain image data and
* will trigger a response message from the worker containing the detected face(s) info.
*/
export const DETECT_FACE = 'DETECT_FACE';
/**
* Available detection types.
*/
export const DETECTION_TYPES = {
FACE_BOX: 'face-box',
FACE_EXPRESSIONS: 'face-expressions'
};
/**
* Threshold for detection score of face.
*/
export const FACE_DETECTION_SCORE_THRESHOLD = 0.75;
/**
* Threshold for stopping detection after a certain number of consecutive errors have occurred.
*/
export const FACE_LANDMARKS_DETECTION_ERROR_THRESHOLD = 4;
/**
* Threshold for number of consecutive detections with no face,
* so that when achieved there will be dispatched an action.
*/
export const NO_FACE_DETECTION_THRESHOLD = 5;
/**
* Constant type used for signaling that no valid face detection is found.
*/
export const NO_DETECTION = 'no-detection';

View File

@@ -0,0 +1,26 @@
import { HumanHelper, IFaceLandmarksHelper } from './FaceLandmarksHelper';
import { DETECT_FACE, INIT_WORKER } from './constants';
let helper: IFaceLandmarksHelper;
onmessage = async function({ data }: MessageEvent<any>) {
switch (data.type) {
case DETECT_FACE: {
if (!helper || helper.getDetectionInProgress()) {
return;
}
const detections = await helper.detect(data);
if (detections) {
self.postMessage(detections);
}
break;
}
case INIT_WORKER: {
helper = new HumanHelper(data);
break;
}
}
};

View File

@@ -0,0 +1,163 @@
import { IReduxState } from '../app/types';
import { IJitsiConference } from '../base/conference/reducer';
import { getLocalParticipant } from '../base/participants/functions';
import { extractFqnFromPath } from '../dynamic-branding/functions.any';
import { FACE_BOX_EVENT_TYPE, FACE_LANDMARKS_EVENT_TYPE, SEND_IMAGE_INTERVAL_MS } from './constants';
import logger from './logger';
import { FaceBox, FaceLandmarks } from './types';
/**
* Sends the face landmarks to other participants via the data channel.
*
* @param {any} conference - The current conference.
* @param {FaceLandmarks} faceLandmarks - Face landmarks to be sent.
* @returns {void}
*/
export function sendFaceExpressionToParticipants(conference: any, faceLandmarks: FaceLandmarks): void {
try {
conference.sendEndpointMessage('', {
type: FACE_LANDMARKS_EVENT_TYPE,
faceLandmarks
});
} catch (err) {
logger.warn('Could not broadcast the face landmarks to the other participants', err);
}
}
/**
* Sends the face box to all the other participants.
*
* @param {any} conference - The current conference.
* @param {FaceBox} faceBox - Face box to be sent.
* @returns {void}
*/
export function sendFaceBoxToParticipants(
conference: any,
faceBox: FaceBox
): void {
try {
conference.sendEndpointMessage('', {
type: FACE_BOX_EVENT_TYPE,
faceBox
});
} catch (err) {
logger.warn('Could not broadcast the face box to the other participants', err);
}
}
/**
* Sends the face landmarks to prosody.
*
* @param {any} conference - The current conference.
* @param {FaceLandmarks} faceLandmarks - Face landmarks to be sent.
* @returns {void}
*/
export function sendFaceExpressionToServer(conference: IJitsiConference | undefined,
faceLandmarks: FaceLandmarks): void {
try {
conference?.sendFaceLandmarks(faceLandmarks);
} catch (err) {
logger.warn('Could not send the face landmarks to prosody', err);
}
}
/**
* Sends face landmarks to backend.
*
* @param {Object} state - Redux state.
* @returns {boolean} - True if sent, false otherwise.
*/
export async function sendFaceExpressionsWebhook(state: IReduxState) {
const { webhookProxyUrl: url } = state['features/base/config'];
const { conference } = state['features/base/conference'];
const { jwt } = state['features/base/jwt'];
const { connection } = state['features/base/connection'];
const jid = connection?.getJid();
const localParticipant = getLocalParticipant(state);
const { faceLandmarksBuffer } = state['features/face-landmarks'];
if (faceLandmarksBuffer.length === 0) {
return false;
}
const headers = {
...jwt ? { 'Authorization': `Bearer ${jwt}` } : {},
'Content-Type': 'application/json'
};
const reqBody = {
meetingFqn: extractFqnFromPath(),
sessionId: conference?.getMeetingUniqueId(),
submitted: Date.now(),
emotions: faceLandmarksBuffer,
participantId: localParticipant?.jwtId,
participantName: localParticipant?.name,
participantJid: jid
};
if (url) {
try {
const res = await fetch(`${url}/emotions`, {
method: 'POST',
headers,
body: JSON.stringify(reqBody)
});
if (res.ok) {
return true;
}
logger.error('Status error:', res.status);
} catch (err) {
logger.error('Could not send request', err);
}
}
return false;
}
/**
* Gets face box for a participant id.
*
* @param {string} id - The participant id.
* @param {IReduxState} state - The redux state.
* @returns {Object}
*/
function getFaceBoxForId(id: string, state: IReduxState) {
return state['features/face-landmarks'].faceBoxes[id];
}
/**
* Gets the video object position for a participant id.
*
* @param {IReduxState} state - The redux state.
* @param {string} id - The participant id.
* @returns {string} - CSS object-position in the shape of '{horizontalPercentage}% {verticalPercentage}%'.
*/
export function getVideoObjectPosition(state: IReduxState, id?: string) {
const faceBox = id && getFaceBoxForId(id, state);
if (faceBox) {
const { right, width } = faceBox;
if (right && width) {
return `${right - (width / 2)}% 50%`;
}
}
return '50% 50%';
}
/**
* Gets the video object position for a participant id.
*
* @param {IReduxState} state - The redux state.
* @returns {number} - Number of milliseconds for doing face detection.
*/
export function getDetectionInterval(state: IReduxState) {
const { faceLandmarks } = state['features/base/config'];
return Math.max(faceLandmarks?.captureInterval || SEND_IMAGE_INTERVAL_MS);
}

View File

@@ -0,0 +1,3 @@
import { getLogger } from '../base/logging/functions';
export default getLogger('features/face-landmarks');

View File

@@ -0,0 +1,130 @@
import { AnyAction } from 'redux';
import { IStore } from '../app/types';
import {
CONFERENCE_JOINED,
CONFERENCE_WILL_LEAVE,
ENDPOINT_MESSAGE_RECEIVED
} from '../base/conference/actionTypes';
import { getCurrentConference } from '../base/conference/functions';
import { getLocalParticipant, getParticipantCount } from '../base/participants/functions';
import MiddlewareRegistry from '../base/redux/MiddlewareRegistry';
import { TRACK_ADDED, TRACK_REMOVED, TRACK_UPDATED } from '../base/tracks/actionTypes';
import FaceLandmarksDetector from './FaceLandmarksDetector';
import { ADD_FACE_LANDMARKS, NEW_FACE_COORDINATES, UPDATE_FACE_COORDINATES } from './actionTypes';
import { FACE_BOX_EVENT_TYPE } from './constants';
import { sendFaceBoxToParticipants, sendFaceExpressionToParticipants } from './functions';
MiddlewareRegistry.register((store: IStore) => (next: Function) => (action: AnyAction) => {
const { dispatch, getState } = store;
const { faceLandmarks: faceLandmarksConfig } = getState()['features/base/config'];
const isEnabled = faceLandmarksConfig?.enableFaceCentering || faceLandmarksConfig?.enableFaceExpressionsDetection;
if (action.type === CONFERENCE_JOINED) {
if (isEnabled) {
FaceLandmarksDetector.init(store);
}
return next(action);
} else if (action.type === ENDPOINT_MESSAGE_RECEIVED) {
// Allow using remote face centering data when local face centering is not enabled.
const { participant, data } = action;
if (data?.type === FACE_BOX_EVENT_TYPE) {
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox: data.faceBox,
id: participant.getId()
});
}
return next(action);
}
if (!isEnabled) {
return next(action);
}
switch (action.type) {
case CONFERENCE_WILL_LEAVE : {
FaceLandmarksDetector.stopDetection(store);
break;
}
case TRACK_ADDED: {
const { jitsiTrack: { isLocal, videoType }, muted } = action.track;
if (videoType === 'camera' && isLocal() && !muted) {
// need to pass this since the track is not yet added in the store
FaceLandmarksDetector.startDetection(store, action.track);
}
break;
}
case TRACK_UPDATED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType !== 'camera' || !isLocal()) {
break;
}
const { muted } = action.track;
if (typeof muted !== 'undefined') {
// addresses video mute state changes
if (muted) {
FaceLandmarksDetector.stopDetection(store);
} else {
FaceLandmarksDetector.startDetection(store);
}
}
break;
}
case TRACK_REMOVED: {
const { jitsiTrack: { isLocal, videoType } } = action.track;
if (videoType === 'camera' && isLocal()) {
FaceLandmarksDetector.stopDetection(store);
}
break;
}
case ADD_FACE_LANDMARKS: {
const state = getState();
const { faceLandmarks } = action;
const conference = getCurrentConference(state);
if (getParticipantCount(state) > 1) {
sendFaceExpressionToParticipants(conference, faceLandmarks);
}
// Disabling for now as there is no value of having the data in speakerstats at the server
// sendFaceExpressionToServer(conference, faceLandmarks);
break;
}
case NEW_FACE_COORDINATES: {
const state = getState();
const { faceBox } = action;
const conference = getCurrentConference(state);
const localParticipant = getLocalParticipant(state);
if (getParticipantCount(state) > 1) {
sendFaceBoxToParticipants(conference, faceBox);
}
dispatch({
type: UPDATE_FACE_COORDINATES,
faceBox,
id: localParticipant?.id
});
break;
}
}
return next(action);
});

View File

@@ -0,0 +1,61 @@
import ReducerRegistry from '../base/redux/ReducerRegistry';
import {
ADD_FACE_LANDMARKS,
CLEAR_FACE_LANDMARKS_BUFFER,
UPDATE_FACE_COORDINATES
} from './actionTypes';
import { FaceBox, FaceLandmarks } from './types';
const defaultState = {
faceBoxes: {},
faceLandmarks: [],
faceLandmarksBuffer: [],
recognitionActive: false
};
export interface IFaceLandmarksState {
faceBoxes: { [key: string]: FaceBox; };
faceLandmarks: Array<FaceLandmarks>;
faceLandmarksBuffer: Array<{
emotion: string;
timestamp: number;
}>;
recognitionActive: boolean;
}
ReducerRegistry.register<IFaceLandmarksState>('features/face-landmarks',
(state = defaultState, action): IFaceLandmarksState => {
switch (action.type) {
case ADD_FACE_LANDMARKS: {
const { addToBuffer, faceLandmarks }: { addToBuffer: boolean; faceLandmarks: FaceLandmarks; } = action;
return {
...state,
faceLandmarks: [ ...state.faceLandmarks, faceLandmarks ],
faceLandmarksBuffer: addToBuffer ? [ ...state.faceLandmarksBuffer,
{
emotion: faceLandmarks.faceExpression,
timestamp: faceLandmarks.timestamp
} ] : state.faceLandmarksBuffer
};
}
case CLEAR_FACE_LANDMARKS_BUFFER: {
return {
...state,
faceLandmarksBuffer: []
};
}
case UPDATE_FACE_COORDINATES: {
return {
...state,
faceBoxes: {
...state.faceBoxes,
[action.id]: action.faceBox
}
};
}
}
return state;
});

View File

@@ -0,0 +1,39 @@
export type DetectInput = {
// @ts-ignore
image: ImageBitmap | ImageData;
threshold: number;
};
export type FaceBox = {
left: number;
right: number;
width?: number;
};
export type InitInput = {
baseUrl: string;
detectionTypes: string[];
};
export type DetectOutput = {
faceBox?: FaceBox;
faceCount: number;
faceExpression?: FaceExpression;
};
export type FaceExpression = {
expression: string;
score: number;
};
export type FaceLandmarks = {
// duration in milliseconds of the face landmarks
duration: number;
faceExpression: string;
score?: number;
// the start timestamp of the expression
timestamp: number;
};