动效给的视频,是黑底的,我们活动需要将该视频变成透明底
方案1:利用canvas
html:
<video
id="video"
style="width: 100%; height: 4rem; display: none"
src="./../../assets/mp4/cj.mp4"
></video>
<canvas id="canvas1" width="375" height="667" style="display: none"></canvas>
<canvas id="canvas2" width="375" height="667"></canvas>
js
const c = document.getElementById('canvas1') as HTMLCanvasElement
const ctx = c.getContext('2d') as any
const c2 = document.getElementById('canvas2') as HTMLCanvasElement
const ctx2 = c2.getContext('2d') as CanvasRenderingContext2D
const video = document.getElementById('video') as any
const i = window.setInterval(function () {
ctx.drawImage(video, 0, 0, 375, 667)
const im = ctx.getImageData(0, 0, 375, 667)
console.log(im)
const l = im.data.length / 4
for (let j = 0; j < l; j += 1) {
// videoData[i] = alphaData[i - 1]
const r = im.data[j * 4 + 0]
const g = im.data[j * 4 + 1]
const b = im.data[j * 4 + 2]
// if (r <= 200 && g <= 200 && b <= 150) im.data[j * 4 + 3] = 0
if (g < 100 && r < 200) im.data[j * 4 + 3] = 0
}
ctx2.putImageData(im, 0, 0, 0, 0, 375, 667)
if (video.ended) {
clearInterval(i)
}
}, 16)
方案2:利用webgl
这里是类:
/* eslint-disable no-unused-expressions */
/* eslint-disable no-underscore-dangle */
class AlphaVideo {
options: any
video!: HTMLVideoElement
playing: boolean | undefined
gl: any
canvas: any
radio: number
duration: number = 0 // 视频的总播放时长
constructor(option: any) {
const defaultOption = {
src: '',
autoplay: true,
loop: true,
canvas: null,
// 默认透明视频展示大小
width: 375,
height: 400,
onError() {},
onPlay() {}
}
this.options = {
...defaultOption,
...option
}
this.radio = window.devicePixelRatio
this.initVideo()
this.initWebgl()
if (this.options.autoplay) {
this.video.play()
}
}
initVideo() {
const { onPlay, onError, loop, src, onEnd, update } = this.options
const video = document.createElement('video')
video.autoplay = false
video.muted = true
video.volume = 0
video.muted = true
video.loop = loop
video.setAttribute('x-webkit-airplay', 'true')
video.setAttribute('webkit-playsinline', 'true')
video.setAttribute('playsinline', 'true')
video.style.display = 'none'
video.src = src
video.crossOrigin = 'anonymous'
video.addEventListener('canplay', () => {
this.playing = true
onPlay && onPlay()
})
video.addEventListener('error', () => {
onError && onError()
})
video.addEventListener('play', () => {
window.requestAnimationFrame(() => {
this.drawFrame()
})
})
video.addEventListener('ended', () => {
onEnd && onEnd()
})
video.addEventListener('timeupdate', () => {
// const timer = setTimeout(() => {
// this.duration = this.video.duration
// clearTimeout(timer)
// }, 1000)
// // console.log(this.video.currentTime)
// console.log('duration', this.video.duration)
update && update(this.video.duration, this.video.currentTime)
})
document.body.appendChild(video)
this.video = video
}
drawFrame() {
if (this.playing) {
this.drawWebglFrame()
}
window.requestAnimationFrame(() => {
this.drawFrame()
})
}
drawWebglFrame() {
const { gl } = this
// 配置纹理图像
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGB, gl.RGB, gl.UNSIGNED_BYTE, this.video)
// 绘制
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4)
}
play() {
this.playing = true
this.video.play()
}
pause() {
this.playing = false
this.video.pause()
}
initWebgl() {
this.canvas = this.options.canvas
this.canvas.width = this.options.width * this.radio
this.canvas.height = this.options.height * this.radio
this.canvas.addEventListener('click', () => {
this.play()
})
if (!this.canvas) {
this.canvas = document.createElement('canvas')
document.body.appendChild(this.canvas)
}
const gl = this.canvas.getContext('webgl', {
antialias: true
})
gl.viewport(0, 0, this.options.width * this.radio, this.options.height * this.radio)
// eslint-disable-next-line no-underscore-dangle
const program = this._initShaderProgram(gl)
gl.linkProgram(program)
gl.useProgram(program)
const buffer = this._initBuffer(gl)
// 绑定缓冲
gl.bindBuffer(gl.ARRAY_BUFFER, buffer.position)
const aPosition = gl.getAttribLocation(program, 'a_position')
// 允许属性读取,将缓冲区的值分配给特定的属性
gl.enableVertexAttribArray(aPosition)
gl.vertexAttribPointer(aPosition, 2, gl.FLOAT, false, 0, 0)
gl.bindBuffer(gl.ARRAY_BUFFER, buffer.texture)
const aTexCoord = gl.getAttribLocation(program, 'a_texCoord')
gl.enableVertexAttribArray(aTexCoord)
gl.vertexAttribPointer(aTexCoord, 2, gl.FLOAT, false, 0, 0)
// 绑定纹理
const texture = this._initTexture(gl)
gl.bindTexture(gl.TEXTURE_2D, texture)
const scaleLocation = gl.getUniformLocation(program, 'u_scale')
gl.uniform2fv(scaleLocation, [this.radio, this.radio])
this.gl = gl
}
_createShader(gl: any, type: any, source: any) {
const shader = gl.createShader(type)
gl.shaderSource(shader, source)
gl.compileShader(shader)
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
console.error(gl.getShaderInfoLog(shader))
}
return shader
}
_initShaderProgram(gl: any) {
// 顶点着色器glsl代码
const vsSource = `
attribute vec2 a_position;
attribute vec2 a_texCoord;
varying vec2 v_texCoord;
uniform vec2 u_scale;
void main(void) {
gl_Position = vec4(a_position, 0.0, 1.0);
v_texCoord = a_texCoord;
}
`
// 片元着色器 glsl 代码
const fsSource = `
precision lowp float;
varying vec2 v_texCoord;
uniform sampler2D u_sampler;
void main(void) {
gl_FragColor = vec4(texture2D(u_sampler, v_texCoord).rgb, texture2D(u_sampler, v_texCoord+vec2(-0.5, 0)).r);
}
`
const vsShader = this._createShader(gl, gl.VERTEX_SHADER, vsSource)
const fsShader = this._createShader(gl, gl.FRAGMENT_SHADER, fsSource)
const program = gl.createProgram()
gl.attachShader(program, vsShader)
gl.attachShader(program, fsShader)
gl.linkProgram(program)
return program
}
_initBuffer(gl: any) {
const positionVertice = new Float32Array([-1.0, 1.0, 1.0, 1.0, -1.0, -1.0, 1.0, -1.0])
const positionBuffer = gl.createBuffer() // 创建buffer
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer) // 把缓冲区对象绑定到目标
gl.bufferData(gl.ARRAY_BUFFER, positionVertice, gl.STATIC_DRAW) // 向缓冲区对象写入刚定义的顶点数据
const textureBuffer = gl.createBuffer()
const textureVertice = new Float32Array([0.5, 1.0, 1.0, 1.0, 0.5, 0.0, 1.0, 0.0]) // 这里将纹理左半部分映射到整个画布上
gl.bindBuffer(gl.ARRAY_BUFFER, textureBuffer)
gl.bufferData(gl.ARRAY_BUFFER, textureVertice, gl.STATIC_DRAW)
return {
position: positionBuffer,
texture: textureBuffer
}
}
_initTexture(gl: any) {
const texture = gl.createTexture()
gl.bindTexture(gl.TEXTURE_2D, texture)
// 对纹理图像进行y轴反转,因为WebGL纹理坐标系统的t轴(分为t轴和s轴)的方向和图片的坐标系统Y轴方向相反。因此将Y轴进行反转。
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, 1)
// 配置纹理参数
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST)
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)
return texture
}
}
export default AlphaVideo
使用方法:这里是具体的业务代码
// 抽卡
const handlerDrawCard = (number: number) => {
lotteryOneDialog.value = false
lotteryTenDialog.value = false
if (cardCount.value === 0) {
Toast('暂无抽奖次数')
return
}
data.canvasVideo = true
let mv: any
nextTick(() => {
if (!mv) {
mv = new AlphaVideo({
src: 'https://cdnsource.bugegaming.com/static/images/newyear/test.mp4',
width: 1000,
height: 1500,
loop: true, // 是否循环播放
canvas: document.getElementById('js_output_2'),
update: (duration: number, currentTime: number) => {
if (Math.floor(currentTime) >= Math.floor(duration)) {
mv.pause()
data.canvasVideo = false
mv = null
const params = {
times: 0,
sessionId: 0
}
if (number === 1) {
params.times = 1
} else if (cardCount.value < 10) {
params.times = cardCount.value
} else {
params.times = 10
}
drawCard(params, userToken.value).then((res: any) => {
if (res.code === 200) {
if (number === 1) {
lotteryOneDialog.value = true
} else {
lotteryTenDialog.value = true
}
data.awardList = res.data
getCardList()
getSwiperData()
getCount()
} else {
Toast.fail(res.msg)
}
})
}
}
})
}
mv.play()
})
}
两种方案对比: 在使用上来看webgl会比canvas更好,如果视频中存在渐变色的话canvas会比较难能处理成想要的效果,而webgl则没有这个问题