init
Some checks failed
Close stale issues and PRs / stale (push) Has been cancelled

This commit is contained in:
2025-09-02 14:49:16 +08:00
commit 38ba663466
2885 changed files with 391107 additions and 0 deletions

View File

@@ -0,0 +1,269 @@
import { VIRTUAL_BACKGROUND_TYPE } from '../../virtual-background/constants';
import {
CLEAR_TIMEOUT,
SET_TIMEOUT,
TIMEOUT_TICK,
timerWorkerScript
} from './TimerWorker';
export interface IBackgroundEffectOptions {
height: number;
virtualBackground: {
backgroundType?: string;
blurValue?: number;
virtualSource?: string;
};
width: number;
}
/**
* Represents a modified MediaStream that adds effects to video background.
* <tt>JitsiStreamBackgroundEffect</tt> does the processing of the original
* video stream.
*/
export default class JitsiStreamBackgroundEffect {
_model: any;
_options: IBackgroundEffectOptions;
_stream: any;
_segmentationPixelCount: number;
_inputVideoElement: HTMLVideoElement;
_maskFrameTimerWorker: Worker;
_outputCanvasElement: HTMLCanvasElement;
_outputCanvasCtx: CanvasRenderingContext2D | null;
_segmentationMaskCtx: CanvasRenderingContext2D | null;
_segmentationMask: ImageData;
_segmentationMaskCanvas: HTMLCanvasElement;
_virtualImage: HTMLImageElement;
_virtualVideo: HTMLVideoElement;
/**
* Represents a modified video MediaStream track.
*
* @class
* @param {Object} model - Meet model.
* @param {Object} options - Segmentation dimensions.
*/
constructor(model: Object, options: IBackgroundEffectOptions) {
this._options = options;
if (this._options.virtualBackground.backgroundType === VIRTUAL_BACKGROUND_TYPE.IMAGE) {
this._virtualImage = document.createElement('img');
this._virtualImage.crossOrigin = 'anonymous';
this._virtualImage.src = this._options.virtualBackground.virtualSource ?? '';
}
this._model = model;
this._segmentationPixelCount = this._options.width * this._options.height;
// Bind event handler so it is only bound once for every instance.
this._onMaskFrameTimer = this._onMaskFrameTimer.bind(this);
// Workaround for FF issue https://bugzilla.mozilla.org/show_bug.cgi?id=1388974
this._outputCanvasElement = document.createElement('canvas');
this._outputCanvasElement.getContext('2d');
this._inputVideoElement = document.createElement('video');
}
/**
* EventHandler onmessage for the maskFrameTimerWorker WebWorker.
*
* @private
* @param {EventHandler} response - The onmessage EventHandler parameter.
* @returns {void}
*/
_onMaskFrameTimer(response: { data: { id: number; }; }) {
if (response.data.id === TIMEOUT_TICK) {
this._renderMask();
}
}
/**
* Represents the run post processing.
*
* @returns {void}
*/
runPostProcessing() {
const track = this._stream.getVideoTracks()[0];
const { height, width } = track.getSettings() ?? track.getConstraints();
const { backgroundType } = this._options.virtualBackground;
if (!this._outputCanvasCtx) {
return;
}
this._outputCanvasElement.height = height;
this._outputCanvasElement.width = width;
this._outputCanvasCtx.globalCompositeOperation = 'copy';
// Draw segmentation mask.
// Smooth out the edges.
this._outputCanvasCtx.filter = backgroundType === VIRTUAL_BACKGROUND_TYPE.IMAGE ? 'blur(4px)' : 'blur(8px)';
this._outputCanvasCtx?.drawImage( // @ts-ignore
this._segmentationMaskCanvas,
0,
0,
this._options.width,
this._options.height,
0,
0,
this._inputVideoElement.width,
this._inputVideoElement.height
);
this._outputCanvasCtx.globalCompositeOperation = 'source-in';
this._outputCanvasCtx.filter = 'none';
// Draw the foreground video.
// @ts-ignore
this._outputCanvasCtx?.drawImage(this._inputVideoElement, 0, 0);
// Draw the background.
this._outputCanvasCtx.globalCompositeOperation = 'destination-over';
if (backgroundType === VIRTUAL_BACKGROUND_TYPE.IMAGE) {
this._outputCanvasCtx?.drawImage( // @ts-ignore
backgroundType === VIRTUAL_BACKGROUND_TYPE.IMAGE
? this._virtualImage : this._virtualVideo,
0,
0,
this._outputCanvasElement.width,
this._outputCanvasElement.height
);
} else {
this._outputCanvasCtx.filter = `blur(${this._options.virtualBackground.blurValue}px)`;
// @ts-ignore
this._outputCanvasCtx?.drawImage(this._inputVideoElement, 0, 0);
}
}
/**
* Represents the run Tensorflow Interference.
*
* @returns {void}
*/
runInference() {
this._model._runInference();
const outputMemoryOffset = this._model._getOutputMemoryOffset() / 4;
for (let i = 0; i < this._segmentationPixelCount; i++) {
const person = this._model.HEAPF32[outputMemoryOffset + i];
// Sets only the alpha component of each pixel.
this._segmentationMask.data[(i * 4) + 3] = 255 * person;
}
this._segmentationMaskCtx?.putImageData(this._segmentationMask, 0, 0);
}
/**
* Loop function to render the background mask.
*
* @private
* @returns {void}
*/
_renderMask() {
this.resizeSource();
this.runInference();
this.runPostProcessing();
this._maskFrameTimerWorker.postMessage({
id: SET_TIMEOUT,
timeMs: 1000 / 30
});
}
/**
* Represents the resize source process.
*
* @returns {void}
*/
resizeSource() {
this._segmentationMaskCtx?.drawImage( // @ts-ignore
this._inputVideoElement,
0,
0,
this._inputVideoElement.width,
this._inputVideoElement.height,
0,
0,
this._options.width,
this._options.height
);
const imageData = this._segmentationMaskCtx?.getImageData(
0,
0,
this._options.width,
this._options.height
);
const inputMemoryOffset = this._model._getInputMemoryOffset() / 4;
for (let i = 0; i < this._segmentationPixelCount; i++) {
this._model.HEAPF32[inputMemoryOffset + (i * 3)] = Number(imageData?.data[i * 4]) / 255;
this._model.HEAPF32[inputMemoryOffset + (i * 3) + 1] = Number(imageData?.data[(i * 4) + 1]) / 255;
this._model.HEAPF32[inputMemoryOffset + (i * 3) + 2] = Number(imageData?.data[(i * 4) + 2]) / 255;
}
}
/**
* Checks if the local track supports this effect.
*
* @param {JitsiLocalTrack} jitsiLocalTrack - Track to apply effect.
* @returns {boolean} - Returns true if this effect can run on the specified track
* false otherwise.
*/
isEnabled(jitsiLocalTrack: any) {
return jitsiLocalTrack.isVideoTrack() && jitsiLocalTrack.videoType === 'camera';
}
/**
* Starts loop to capture video frame and render the segmentation mask.
*
* @param {MediaStream} stream - Stream to be used for processing.
* @returns {MediaStream} - The stream with the applied effect.
*/
startEffect(stream: MediaStream) {
this._stream = stream;
this._maskFrameTimerWorker = new Worker(timerWorkerScript, { name: 'Blur effect worker' });
this._maskFrameTimerWorker.onmessage = this._onMaskFrameTimer;
const firstVideoTrack = this._stream.getVideoTracks()[0];
const { height, frameRate, width }
= firstVideoTrack.getSettings ? firstVideoTrack.getSettings() : firstVideoTrack.getConstraints();
this._segmentationMask = new ImageData(this._options.width, this._options.height);
this._segmentationMaskCanvas = document.createElement('canvas');
this._segmentationMaskCanvas.width = this._options.width;
this._segmentationMaskCanvas.height = this._options.height;
this._segmentationMaskCtx = this._segmentationMaskCanvas.getContext('2d');
this._outputCanvasElement.width = parseInt(width, 10);
this._outputCanvasElement.height = parseInt(height, 10);
this._outputCanvasCtx = this._outputCanvasElement.getContext('2d');
this._inputVideoElement.width = parseInt(width, 10);
this._inputVideoElement.height = parseInt(height, 10);
this._inputVideoElement.autoplay = true;
this._inputVideoElement.srcObject = this._stream;
this._inputVideoElement.onloadeddata = () => {
this._maskFrameTimerWorker.postMessage({
id: SET_TIMEOUT,
timeMs: 1000 / 30
});
};
return this._outputCanvasElement.captureStream(parseInt(frameRate, 10));
}
/**
* Stops the capture and render loop.
*
* @returns {void}
*/
stopEffect() {
this._maskFrameTimerWorker.postMessage({
id: CLEAR_TIMEOUT
});
this._maskFrameTimerWorker.terminate();
}
}

View File

@@ -0,0 +1,67 @@
/**
* SET_TIMEOUT constant is used to set interval and it is set in
* the id property of the request.data property. TimeMs property must
* also be set.
*
* ```
* //Request.data example:
* {
* id: SET_TIMEOUT,
* timeMs: 33
* }
* ```
*/
export const SET_TIMEOUT = 1;
/**
* CLEAR_TIMEOUT constant is used to clear the interval and it is set in
* the id property of the request.data property.
*
* ```
* {
* id: CLEAR_TIMEOUT
* }
* ```
*/
export const CLEAR_TIMEOUT = 2;
/**
* TIMEOUT_TICK constant is used as response and it is set in the id property.
*
* ```
* {
* id: TIMEOUT_TICK
* }
* ```
*/
export const TIMEOUT_TICK = 3;
/**
* The following code is needed as string to create a URL from a Blob.
* The URL is then passed to a WebWorker. Reason for this is to enable
* use of setInterval that is not throttled when tab is inactive.
*/
const code = `
var timer;
onmessage = function(request) {
switch (request.data.id) {
case ${SET_TIMEOUT}: {
timer = setTimeout(() => {
postMessage({ id: ${TIMEOUT_TICK} });
}, request.data.timeMs);
break;
}
case ${CLEAR_TIMEOUT}: {
if (timer) {
clearTimeout(timer);
}
break;
}
}
};
`;
// @ts-ignore
export const timerWorkerScript = URL.createObjectURL(new Blob([ code ], { type: 'application/javascript' }));

View File

@@ -0,0 +1,106 @@
/* eslint-disable lines-around-comment */
import { IStore } from '../../app/types';
import { showWarningNotification } from '../../notifications/actions';
import { NOTIFICATION_TIMEOUT_TYPE } from '../../notifications/constants';
import { timeout } from '../../virtual-background/functions';
import logger from '../../virtual-background/logger';
import JitsiStreamBackgroundEffect, { IBackgroundEffectOptions } from './JitsiStreamBackgroundEffect';
// @ts-ignore
import createTFLiteModule from './vendor/tflite/tflite';
// @ts-ignore
import createTFLiteSIMDModule from './vendor/tflite/tflite-simd';
const models = {
modelLandscape: 'libs/selfie_segmentation_landscape.tflite'
};
/* eslint-enable lines-around-comment */
let modelBuffer: ArrayBuffer;
let tflite: any;
let wasmCheck;
let isWasmDisabled = false;
const segmentationDimensions = {
modelLandscape: {
height: 144,
width: 256
}
};
/**
* Creates a new instance of JitsiStreamBackgroundEffect. This loads the Meet background model that is used to
* extract person segmentation.
*
* @param {Object} virtualBackground - The virtual object that contains the background image source and
* the isVirtualBackground flag that indicates if virtual image is activated.
* @param {Function} dispatch - The Redux dispatch function.
* @returns {Promise<JitsiStreamBackgroundEffect>}
*/
export async function createVirtualBackgroundEffect(virtualBackground: IBackgroundEffectOptions['virtualBackground'],
dispatch?: IStore['dispatch']) {
if (!MediaStreamTrack.prototype.getSettings && !MediaStreamTrack.prototype.getConstraints) {
throw new Error('JitsiStreamBackgroundEffect not supported!');
}
if (isWasmDisabled) {
dispatch?.(showWarningNotification({
titleKey: 'virtualBackground.backgroundEffectError'
}, NOTIFICATION_TIMEOUT_TYPE.LONG));
return;
}
// Checks if WebAssembly feature is supported or enabled by/in the browser.
// Conditional import of wasm-check package is done to prevent
// the browser from crashing when the user opens the app.
if (!tflite) {
try {
wasmCheck = require('wasm-check');
const tfliteTimeout = 10000;
if (wasmCheck?.feature?.simd) {
tflite = await timeout(tfliteTimeout, createTFLiteSIMDModule());
} else {
tflite = await timeout(tfliteTimeout, createTFLiteModule());
}
} catch (err: any) {
if (err?.message === '408') {
logger.error('Failed to download tflite model!');
dispatch?.(showWarningNotification({
titleKey: 'virtualBackground.backgroundEffectError'
}, NOTIFICATION_TIMEOUT_TYPE.LONG));
} else {
isWasmDisabled = true;
logger.error('Looks like WebAssembly is disabled or not supported on this browser', err);
dispatch?.(showWarningNotification({
titleKey: 'virtualBackground.webAssemblyWarning',
descriptionKey: 'virtualBackground.webAssemblyWarningDescription'
}, NOTIFICATION_TIMEOUT_TYPE.LONG));
}
return;
}
}
if (!modelBuffer) {
const modelResponse = await fetch(models.modelLandscape);
if (!modelResponse.ok) {
throw new Error('Failed to download tflite model!');
}
modelBuffer = await modelResponse.arrayBuffer();
tflite.HEAPU8.set(new Uint8Array(modelBuffer), tflite._getModelBufferMemoryOffset());
tflite._loadModel(modelBuffer.byteLength);
}
const options = {
...segmentationDimensions.modelLandscape,
virtualBackground
};
return new JitsiStreamBackgroundEffect(tflite, options);
}

View File

@@ -0,0 +1,24 @@
# Virtual Background on stream effects
> From https://google.github.io/mediapipe/solutions/models.html#selfie-segmentation
#### Canvas 2D + CPU
This rendering pipeline is pretty much the same as for BodyPix. It relies on Canvas compositing properties to blend rendering layers according to the segmentation mask.
Interactions with TFLite inference tool are executed on CPU to convert from UInt8 to Float32 for the model input and to apply softmax on the model output.
The framerate is higher and the quality looks better than BodyPix
#### SIMD and non-SIMD
How to test on SIMD:
1. Go to chrome://flags/
2. Search for SIMD flag
3. Enable WebAssembly SIMD support(Enables support for the WebAssembly SIMD proposal).
4. Reopen Google Chrome
More details:
- [WebAssembly](https://webassembly.org/)
- [WebAssembly SIMD](https://github.com/WebAssembly/simd)
- [TFLite](https://blog.tensorflow.org/2020/07/accelerating-tensorflow-lite-xnnpack-integration.html)

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long