虚拟背景
思路:1.原始拍摄背景统一,传统技术:
纯色背景替换核心原理
复制原始视频帧 → 颜色检测 → 生成透明蒙版 → 合成新背景 → 输出视频流
环境初始化阶段
<!-- 页面基础结构 -->
<video id="sourceVideo" autoplay muted style="display:none;"></video>
<canvas id="processingCanvas" style="display:none;"></canvas>
<video id="outputVideo" autoplay muted>
</video>
// 全局对象
const videoConstraints = {
width: 1280, height: 720, frameRate: { ideal: 30 }
};
let mediaStream; let processedStream; let peerConnection;
WebRTC与视频处理集成步骤
- 复制原始视频帧
async function initCamera() {
try {
mediaStream = await navigator.mediaDevices.getUserMedia(
{ video: videoConstraints,
audio: true // 保留原始音频
});
// 原始视频预览(调试用)
const sourceVideo = document.getElementById('sourceVideo'); sourceVideo.srcObject = mediaStream; // 初始化Canvas处理器 initCanvasProcessor();
} catch (error) {
console.error('摄像头初始化失败:', error);
}
}
- 视频帧处理管线
function initCanvasProcessor() {
const canvas = document.getElementById('processingCanvas');
const ctx = canvas.getContext('2d');
const sourceVideo = document.getElementById('sourceVideo'); // 设置Canvas与视频同分辨率
canvas.width = videoConstraints.width;
canvas.height = videoConstraints.height; // 创建处理后的媒体流
processedStream = canvas.captureStream(25); // 帧处理循环
function processFrame() {
if (sourceVideo.readyState === 4) {
ctx.drawImage(sourceVideo, 0, 0);
const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
// 执行背景替换算法(复用之前的颜色检测逻辑)
const processedData = processChromaKey(imageData); ctx.putImageData(processedData, 0, 0);
}
requestAnimationFrame(processFrame);
}
sourceVideo.onplaying = () => { processFrame(); };
}
- WebRTC连接建立
async function startCall() { // 创建Peer
Connection peerConnection = new RTCPeerConnection({ iceServers: [{ urls: "stun:stun.l.google.com:19302" }] });
// 添加处理后的视频轨道
const videoTrack = processedStream.getVideoTracks()[0];
peerConnection.addTrack(videoTrack, processedStream);
// 添加原始音频轨道(可选)
const audioTrack = mediaStream.getAudioTracks()[0];
peerConnection.addTrack(audioTrack, mediaStream);
// 生成SDP Offer
const offer = await peerConnection.createOffer();
await peerConnection.setLocalDescription(offer);
// 这里应通过信令服务器发送offer给对端
}
- 输出流监控
// 显示本地处理后的视频
document.getElementById('outputVideo').srcObject = processedStream; // 接收远端流(示例)
peerConnection.ontrack = (event) => {
const remoteVideo = document.createElement('video'); remoteVideo.srcObject = event.streams[0]; document.body.appendChild(remoteVideo);
};
- 背景替换算法
function processChromaKey(inputFrame: ImageData, params: ChromaKeyParams): ImageData {
const output = new ImageData(inputFrame.width, inputFrame.height);
const targetYuv = rgbToYuv(...params.targetColor);
for (let i = 0; i < inputFrame.data.length; i += 4) {
let r = inputFrame.data[i];
let g = inputFrame.data[i + 1];
let b = inputFrame.data[i + 2];
// 消除颜色溢出
if (params.spillRemoval) {
[r, g, b] = removeColorSpill(r, g, b, params.targetColor);
}
// 转换到YUV空间
const pixelYuv = rgbToYuv(r, g, b); // 计算颜色差异
const yDiff = Math.abs(pixelYuv[0] - targetYuv[0]);
const uDiff = Math.abs(pixelYuv[1] - targetYuv[1]);
const vDiff = Math.abs(pixelYuv[2] - targetYuv[2]);
const distance = yDiff * 0.5 + uDiff * 1.2 + vDiff * 1.2;
// 计算透明度
const alpha = calculateAlpha(distance, params.tolerance); output.data[i] = r;
output.data[i + 1] = g;
output.data[i + 2] = b;
output.data[i + 3] = alpha;
}
// 边缘平滑处理
if (params.edgeSmooth) {
applyEdgeSmoothing(output.data, inputFrame.width, inputFrame.height);
}
return output;
}
2.使用AI模型图像分割
利用谷歌开源的一个机器学习框架 MediaPipe实现虚拟背景的功能。
@mediapipe/selfie_segmentation依赖包
- 初始化视频流和上面一样不过多讲
*分割工具准备
function initCanvasProcessor(bg) {
const canvas = document.getElementById('processingCanvas');
const ctx = canvas.getContext('2d');
const image = new Image();
image.crossOrigin = 'anonymous'; // 强制跨域请求携带凭据
image.src = bg==`/image/${bg}.jpg`//背景图片
selfieSegmentation = new SFS.SelfieSegmentation({locateFile: (file) => {
return `https://cdn.jsdelivr.'net/npm/@mediapipe/selfie_segmentation@0.1.1632777926/${file}`;
}});
selfieSegmentation.setOptions({
modelSelection: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
selfieSegmentation.onResults((res)=>{
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
if (!image.complete || image.naturalWidth === 0) {
console.error("图像未正确加载");
return;
}
// 如果是视频,检查 readyState
if (image.tagName === 'VIDEO' && image.readyState < 2) {
console.error("视频未就绪");
return;
}
canvasCtx.drawImage(results.segmentationMask, 0, 0, canvasElement.width, canvasElement.height);
//利用canvas绘制新背景
canvasCtx.globalCompositeOperation = 'source-out';
canvasCtx.drawImage(image, 0, 0, image.width, image.height, 0, 0, canvasElement.width, canvasElement.height);
canvasCtx.globalCompositeOperation = 'destination-atop';
canvasCtx.drawImage(results.image, 0, 0, canvasElement.width, canvasElement.height);
canvasCtx.restore();
});
}
- 背景替换
function initCanvasProcessor() {
const canvas = document.getElementById('processingCanvas');
if(animationId){
cancelAnimationFrame(animationId)
animationId=null
}
if(canvasStream){
canvasStream.getTracks().forEach(track=>track.stop())
}// 设置Canvas与视频同分辨率
canvas.width = videoConstraints.width;
canvas.height = videoConstraints.height; // 创建处理后的媒体流
let lastTime = new Date();
async function getFrames() {
const now = video.currentTime;
if(now > lastTime){
await selfieSegmentation.send({image: video});
}
lastTime = now;
//无限定时循环 退出记得取消 cancelAnimationFrame()
animationId = requestAnimationFrame(getFrames);
};
getFrames()
return canvas.captureStream(25); // 帧处理循环
}
- 获取虚拟流且将其他端的远程流替换
function changeRemoteStream(bg="bg"){
const stream= initCanvasProcessor()
//先获取要替换的流 视频
const [videoTrack] = stream.getVideoTracks();
//主播端所有关联关系遍历并替换新的流
RtcPcMaps.forEach(e => {
const senders = e.getSenders();
const send = senders.find((s) => s.track.kind === 'video')
send.replaceTrack(videoTrack)
})
}
总结: 使用两种方法替换了虚拟背景
环境校准工具:开发背景颜色校准界面,允许用户手动采样背景色
- 光照监测模块:实时检测画面亮度变化并提示用户调整
- 混合模式:在检测到复杂背景时自动切换至AI模型方案
如果您的应用场景满足以下条件,传统方案完全可行:
- 背景颜色均匀且与前景差异明显
- 摄像头位置固定
- 环境光照可控