前端使用wasm(ffmpeg)解码视频数据,解码后得到的视频帧数据是YUV格式的,需要使用pixi.js把它渲染出来。首先,需要将yuv数据转为rgb数据,然后将数据绘制到离屏canvas上,最后将离屏canvas作为精灵图的纹理进行渲染。 实现代码如下:
// 渲染yuv帧
renderYUVYFrame(yuvData, videoParams) {
const { videoWidth, videoHeight } = videoParams
if (!this.yuvCtx) {
this.yuvCanvas = new OffscreenCanvas(videoWidth, videoHeight)
this.yuvCtx = this.yuvCanvas.getContext('2d')
}
// 将yuv数据绘制到离屏yuvCanvas上
yvu2Canvas(this.yuvCtx, yuvData, videoWidth, videoHeight)
// 创建或更新纹理
if (!this.texture) {
this.texture = Texture.from(this.yuvCanvas)
} else {
this.texture.update()
}
// 创建或更新全景精灵图
if (!this.sprite) {
this.sprite = Sprite.from(this.texture)
// this.sprite.anchor.set(0, 0.5)
this.sprite.width = this.app.view.width
this.sprite.height = this.app.view.height
this.sprite.position.set(0, 0)
// 添加到舞台
this.app.stage..addChild(this.sprite)
}
}
// yvu转rgb
const yuv2Rgb = (y, u, v) => {
const r = y + 1.402 * (v - 128)
const g = y - 0.34414 * (u - 128) - 0.71414 * (v - 128)
const b = y + 1.772 * (u - 128)
return [r, g, b]
}
/**
* 将yvu数据渲染到canvas上
* @param {*} ctx canvas的上下文
* @param {*} yuvData yvu数据
* @param {*} width 宽度
* @param {*} height 高度
*/
const yvu2Canvas = (ctx, yuvData, width, height) => {
const imgData = ctx.createImageData(width, height)
let yIndex = 0
let uIndex = width * height
let vIndex = uIndex + (width * height) / 4
for (let i = 0; i < height; i++) {
for (let j = 0; j < width; j++) {
const y = yuvData[yIndex++]
const u = yuvData[uIndex + (i >> 1) * (width >> 1) + (j >> 1)]
const v = yuvData[vIndex + (i >> 1) * (width >> 1) + (j >> 1)]
const [r, g, b] = yuv2Rgb(y, u, v)
const index = (i * width + j) * 4
imgData.data[index] = r
imgData.data[index + 1] = g
imgData.data[index + 2] = b
imgData.data[index + 3] = 255 // Alpha channel
}
}
ctx.putImageData(imgData, 0, 0)
}