Skip to content

Commit

Permalink
blur with insertable streams
Browse files Browse the repository at this point in the history
  • Loading branch information
Mazuh committed Mar 8, 2023
1 parent 4194163 commit 5b905d1
Show file tree
Hide file tree
Showing 6 changed files with 116 additions and 35 deletions.
11 changes: 7 additions & 4 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"devDependencies": {
"@babel/core": "^7.18.6",
"prettier": "^2.7.1",
"typescript": "^4.7.4"
"typescript": "^4.9.5"
},
"dependencies": {
"@mediapipe/drawing_utils": "^0.3.1675466124",
Expand Down
67 changes: 53 additions & 14 deletions src/advanced/background-blur.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ import { CleanupFn } from '../webrtc';
import { SelfieSegmentation } from '@mediapipe/selfie_segmentation';

export async function startBackgrondBlur(
videoEl: HTMLVideoElement,
canvasEl: HTMLCanvasElement
sourceVideoEl: HTMLVideoElement,
targetVideoEl: HTMLVideoElement
): Promise<CleanupFn> {
const canvasCtx = canvasEl.getContext('2d')!;
// setup semantic segmentation and download its trainings

const selfieSegmentation = new SelfieSegmentation({
locateFile: (file) =>
Expand All @@ -18,35 +18,74 @@ export async function startBackgrondBlur(
selfieMode: true,
});

selfieSegmentation.onResults((results: any) => {
// setup drawing instructions

const canvas = new OffscreenCanvas(sourceVideoEl.clientWidth, sourceVideoEl.clientHeight);
const canvasCtx = canvas.getContext('2d') as OffscreenCanvasRenderingContext2D;

selfieSegmentation.onResults((results) => {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasEl.width, canvasEl.height);
canvasCtx.clearRect(0, 0, canvas.width, canvas.height);
canvasCtx.filter = '';

// just redraw entire image in default composition.
canvasCtx.globalCompositeOperation = 'source-over';
canvasCtx.drawImage(results.image, 0, 0, canvasEl.width, canvasEl.height);
canvasCtx.drawImage(results.image, 0, 0, canvas.width, canvas.height);

// existing image is kept only where selfie segment overlaps it,
// i.e., now only selfie will remain.
canvasCtx.globalCompositeOperation = 'destination-in';
canvasCtx.drawImage(results.segmentationMask, 0, 0, canvasEl.width, canvasEl.height);
canvasCtx.drawImage(results.segmentationMask, 0, 0, canvas.width, canvas.height);

// draw blurred image behind existing content,
// i.e., now selfie will remain but with the blurred image behind it.
canvasCtx.filter = 'blur(10px)';
canvasCtx.globalCompositeOperation = 'destination-over';
canvasCtx.drawImage(results.image, 0, 0, canvasEl.width, canvasEl.height);
canvasCtx.drawImage(results.image, 0, 0, canvas.width, canvas.height);

canvasCtx.restore();
});

const handleFrame = () =>
selfieSegmentation
.send({ image: videoEl })
.then(() => videoEl.requestVideoFrameCallback(handleFrame));
videoEl.requestVideoFrameCallback(handleFrame);
// setup processor

const [sourceVideoTrack] = (sourceVideoEl.srcObject as MediaStream).getVideoTracks();
const trackProcessor = new MediaStreamTrackProcessor({ track: sourceVideoTrack });

const transformer = new TransformStream({
async transform(videoFrame, controller) {
// send frame for MediaPipe to process (and later generate a result to be handled)
videoFrame.width = videoFrame.displayWidth;
videoFrame.height = videoFrame.displayHeight;
await selfieSegmentation.send({ image: videoFrame });
videoFrame.close();

// initialize a new frame from canvas and put it on transformation queue
const nextFrame = new VideoFrame(canvas, {
timestamp: videoFrame.timestamp,
});
controller.enqueue(nextFrame);
},
});

// connecting everything...
// from source track reader thru transformer and ends at a given track generator,
// and such track generator is just a lazy evaluated track to be added to a stream,
// and such stream will be played in a regular DOM element.

const trackGenerator = new MediaStreamTrackGenerator({ kind: 'video' });
trackProcessor.readable.pipeThrough(transformer).pipeTo(trackGenerator.writable);

const processedStream = new MediaStream();
processedStream.addTrack(trackGenerator);

targetVideoEl.srcObject = processedStream;
await targetVideoEl.play();

// build function to cleanup these resources when needed

const stopSelfieSegmentation = async () => selfieSegmentation.close(); // fyi, see: https://github.com/google/mediapipe/issues/3373
const stopSelfieSegmentation = async () => {
selfieSegmentation.close(); // fyi, see: https://github.com/google/mediapipe/issues/3373
processedStream.getTracks().forEach((t) => t.stop());
};
return stopSelfieSegmentation;
}
36 changes: 28 additions & 8 deletions src/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -77,21 +77,30 @@ <h1>Mirror</h1>
<br />

<section class="advanced-section">
<h2>Advanced experiments</h2>
<p>
Most of them only work with desktop Chrome and you need to have previously
<strong>started your video input above before</strong> hitting start buttons below.
</p>
<h2>WebRTC experiments</h2>

<article>
<h3>Browser data</h3>
<p>By WebRTCHacks Adapter.js inference.</p>
<p>
For some mobile devices, your <strong>real</strong> browser engine will be revealed
below, despite of its user-friendly presentation name.
</p>
<p>Name: <code id="browsername">unknown</code></p>
<p>Version: <code id="browserversion">unknown</code></p>
</article>

<article>
<h3>Face detection</h3>
<button type="button" id="facedetection-start-btn" class="d-block">Start</button>
<p>Using MediaPipe with native Canvas.</p>
<p>
You need to have
<strong>successfully started</strong> your video input above
<strong>before</strong> hitting the start button below.
</p>
<button type="button" id="facedetection-start-btn" class="d-block">
Start face detection
</button>
<button type="button" id="facedetection-stop-btn" class="outline-yellow d-none">
Stop
</button>
Expand All @@ -100,11 +109,22 @@ <h3>Face detection</h3>

<article>
<h3>Background blur</h3>
<button type="button" id="backgroundblur-start-btn" class="d-block">Start</button>
<p>
Using MediaPipe with native (and very new) Insertable Streams. It only works for
<strong>desktop Chrome</strong>.
</p>
<p>
You need to have
<strong>successfully started</strong> your video input above
<strong>before</strong> hitting the start button below.
</p>
<button type="button" id="backgroundblur-start-btn" class="d-block">
Start background blur
</button>
<button type="button" id="backgroundblur-stop-btn" class="outline-yellow d-none">
Stop
</button>
<canvas id="backgroundblur-canvas" class="d-none"></canvas>
<video id="backgroundblur-video" class="d-none w-100" loop muted playsinline></video>
</article>
</section>
</main>
Expand Down
20 changes: 20 additions & 0 deletions src/styles.css
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,27 @@ audio {
outline: 1px solid #44bd32;
}

h2 {
margin-left: 15px;
}

h3 {
margin-left: 25px;
}

@media only screen and (max-width: 800px) {
select,
audio {
width: 100%;
}

h2 {
margin-left: 8px;
}

h3 {
margin-left: 20px;
}
}

/* specific stuff */
Expand Down Expand Up @@ -102,3 +118,7 @@ audio {
margin-top: 15px;
margin-bottom: 15px;
}

.advanced-section article {
margin-top: 50px;
}
15 changes: 7 additions & 8 deletions src/ui.ts
Original file line number Diff line number Diff line change
Expand Up @@ -249,22 +249,21 @@ export async function setupBackgroundBlur() {
const stopBtn = getOrDie('backgroundblur-stop-btn') as HTMLButtonElement;

startBtn.addEventListener('click', async () => {
console.warn('click');
const videoEl = getOrDie('camera-demo-video') as HTMLVideoElement;
const sourceVideoEl = getOrDie('camera-demo-video') as HTMLVideoElement;

const canvasEl = getOrDie('backgroundblur-canvas') as HTMLCanvasElement;
canvasEl.width = videoEl.clientWidth;
canvasEl.height = videoEl.clientHeight;
const targetVideoEl = getOrDie('backgroundblur-video') as HTMLCanvasElement;
targetVideoEl.width = sourceVideoEl.clientWidth;
targetVideoEl.height = sourceVideoEl.clientHeight;

showBlock(canvasEl);
showBlock(targetVideoEl);
hideBlock(startBtn);
showBlock(stopBtn);

const stopFaceDetection = await startBackgrondBlur(videoEl, canvasEl);
const stopFaceDetection = await startBackgrondBlur(sourceVideoEl, targetVideoEl);

const fullCleanup = () =>
stopFaceDetection().finally(() => {
hideBlock(canvasEl);
hideBlock(targetVideoEl);
hideBlock(stopBtn);
showBlock(startBtn);
});
Expand Down

0 comments on commit 5b905d1

Please sign in to comment.