webCodec流程解析
通过VideoEncoder将Canvas的VideoFrame编码成EncodedVideoChunk
用VideoDecoder反向操作,将EncodedVideoChunk解码成VideoFrame,传给canvas进行播放
webCodec代码解析
// index.html
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>WebCodecs demo: Encoding and Decoding</title>
<style>
canvas {
padding: 10px;
background: gold;
margin: 5px;
}
</style>
</head>
<body>
<canvas id="src" width="640" height="480"></canvas>
<div id="dst"></div>
<script>
const foods = ['🍇','🍈','🍉','🍊','🍋','🍌','🍍','🥭','🍎','🍏','🍐','🍑','🍒','🍓','🥝','🍅','🥥','🥑','🍆','🥔','🥕','🌽','🌶️','🥒','🥬','🥦','🧄','🧅','🍄','🥜','🌰',
'🍞','🍞','🥐','🥖','🥨','🥯','🥞','🧇','🧀','🍖','🍗','🥩','🥓','🍔','🍟','🍕','🌭','🥪','🌮','🌯','🥙','🧆','🥚','🍳','🥘','🍲','🥣','🥗','🍿','🧈','🧂',
'🥫','🍱','🍘','🍙','🍚','🍛','🍜','🍝','🍠','🍢','🍣','🍤','🍥','🥮','🍡','🥟','🥠','🥡','🦀','🦞','🦐','🦑','🦪','🍦','🍧','🍨','🍩','🍪','🎂','🍰','🧁',
'🥧','🍫','🍬','🍭','🍮','🍯','🍼','🥛','☕','🍵','🍶','🍾','🍷','🍸','🍹','🍺','🍻','🥂','🥃','🥤','🧃','🧉','🧊'];
function getRandomFood() {
let index = Math.floor(Math.random() * foods.length);
return foods[index];
}
// 在canvas1上绘制动画
async function startDrawing() {
let cnv = document.getElementById("src");
var ctx = cnv.getContext('2d');
ctx.fillStyle = "#fff5e6";
let width = cnv.width;
let height = cnv.height;
let cx = width / 2;
let cy = height / 2;
let r = Math.min(width, height) / 5;
ctx.font = '30px Helvetica';
const text = getRandomFood() + "📹📷Hello WebCodecs 🎥🎞️" + getRandomFood();
const size = ctx.measureText(text).width;
let drawOneFrame = function (time) {
let angle = Math.PI * 2 * (time / 5000);
let scale = 1 + 0.3 * Math.sin(Math.PI * 2 * (time / 7000));
ctx.save();
ctx.fillRect(0, 0, width, height);
ctx.translate(cx, cy);
ctx.rotate(angle);
ctx.scale(scale, scale);
ctx.fillStyle = "hsl(" + (angle * 40 ) + ",80%,50%)";
ctx.fillRect(-size / 2, 10, size, 25);
ctx.fillStyle = 'black';
ctx.fillText(text, -size / 2, 0);
ctx.restore();
window.requestAnimationFrame(drawOneFrame);
}
window.requestAnimationFrame(drawOneFrame);
}
function startWorker() {
let worker = new Worker('video-worker.js', { name: "Video worker"});
worker.onmessage = function(e) {
// Recreate worker in case of an error
console.log('Worker error: ' + e.data);
worker.terminate();
startWorker();
};
// 下面这段代码捕获canvas1的动画
let src_cnv = document.getElementById("src");
const fps = 25;
// 返回的CanvasCaptureMediaStream 是一个实时视频捕获的画布。
let stream = src_cnv.captureStream(fps);
const track = stream.getVideoTracks()[0]
// 它消耗MediaStreamTrack对象的源并生成媒体帧流,特别是VideoFrame或AudioFrame)对象。你可以把它看作是一个轨道
media_processor = new MediaStreamTrackProcessor(track);
// readableStream https://developer.mozilla.org/en-US/docs/Web/API/ReadableStream
const reader = media_processor.readable;
// Create a new destination canvas
const dst_cnv = document.createElement('canvas');
dst_cnv.width = src_cnv.width;
dst_cnv.height = src_cnv.height;
const dst = document.getElementById("dst");
if (dst.firstChild)
dst.removeChild(dst.firstChild);
dst.appendChild(dst_cnv);
// 创建离屏渲染对象OffscreenCanvas,不阻塞渲染进程
let offscreen = dst_cnv.transferControlToOffscreen();
worker.postMessage({
canvas : offscreen,
frame_source : reader,
fps : fps
}, [offscreen, reader]);
}
function main() {
if (!("VideoFrame" in window)) {
document.body.innerHTML = "<h1>WebCodecs API is not supported.</h1>";
return;
}
startDrawing();
startWorker();
}
document.body.onload = main;
</script>
</body>
</html>
// video.worker.js
let codec_string = "avc1.42001E";
function reportError(e) {
// Report error to the main thread
console.log(e.message)
postMessage(e.message);
}
// 捕获canvas1上的动画,并开始分装帧
function captureAndEncode(frame_source, cnv, fps, processChunk) {
let frame_counter = 0;
const init = {
// 每当编码器完成一帧数据的编码时,它将调用注册的 output 回调函数,并将编码数据作为参数传递给该函数。
output: processChunk,
error: reportError
};
const config = {
// 常见codec https://www.w3.org/TR/webcodecs-codec-registry/#video-codec-registry
codec: codec_string,
width: cnv.width,
height: cnv.height,
bitrate: 1000000,
avc : { format: "annexb" },
framerate: fps,
// 配置硬件加速器,这里为什么要用软解码?感觉能走硬解就走硬解啊
hardwareAcceleration : "prefer-software",
};
// VideoEncoder对象,将videoFrame 转为 EncodedVideoChunk
let encoder = new VideoEncoder(init);
// VideoEncoder接口的configure()方法将编码器的状态更改为“已配置”,并异步准备编码器接受VideoEncoders以使用指定参数进行编码。
encoder.configure(config);
let reader = frame_source.getReader();
async function readFrame () {
// 读取canvas1的当前画面帧
const result = await reader.read();
let frame = result.value;
if (encoder.encodeQueueSize < 2) {
frame_counter++;
const insert_keyframe = false;
// 开始对这一帧编码
encoder.encode(frame, { keyFrame: insert_keyframe });
frame.close();
} else {
// frame队列中累计太多,则适当丢弃frame,避免canvas1和canvas2动画脱节
console.log("dropping a frame");
frame.close();
}
setTimeout(readFrame, 1);
};
readFrame();
}
// 创建解码对象VideoDecoder,并开始解码渲染
function startDecodingAndRendering(cnv) {
let ctx = cnv.getContext("2d");
let ready_frames = [];
let underflow = true;
// 在离屏Canvas上开始绘制
async function renderFrame() {
if (ready_frames.length == 0) {
underflow = true;
return;
}
let frame = ready_frames.shift();
underflow = false;
ctx.drawImage(frame, 0, 0);
frame.close();
// 立即安排下一帧的渲染,不知道为什么不用requestAnimation
setTimeout(renderFrame, 0);
}
// frame参数 为VideoFrame对象 代表视频的一帧 https://developer.mozilla.org/en-US/docs/Web/API/VideoFrame
function handleFrame(frame) {
ready_frames.push(frame);
// underflow锁,需要解码队列为空时打开,开始解码时关闭
if (underflow) {
underflow = false;
setTimeout(renderFrame, 0);
}
}
// 参数为两个回调函数
const init = {
// 每当解码器完成一帧数据的解码时,它将调用注册的 output 回调函数,并将解码数据作为参数传递给该函数。
output: handleFrame,
error: reportError
};
// 初始化VideoDecoder对象,将EncodedVideoChunk解码为VideoFrame 给Canvas是用
let decoder = new VideoDecoder(init);
return decoder;
}
function main(frame_source, canvas, fps) {
let decoder = startDecodingAndRendering(canvas);
// 对EncodedVideoChunk解码
function processChunk(chunk, md) {
let config = md.decoderConfig;
if (config) {
console.log("decoder reconfig");
decoder.configure(config);
}
// 开始解码
decoder.decode(chunk);
}
captureAndEncode(frame_source, canvas, fps, processChunk);
}
self.onmessage = async function(e) {
let frame_source = e.data.frame_source;
let canvas = e.data.canvas;
let fps = e.data.fps;
main(frame_source, canvas, fps);
}