WebGPU入门学习(2)-- 改变三角形尺寸和颜色

134 阅读3分钟

回顾

在上一篇文章中(WebGPU入门学习(1)-- 基本概念和绘制三角形),我们简单了解到 WebGPU 工作原理,并绘制出了一个三角形。三角形的位置和颜色写在对应的 shader 中,如果需要改动位置和颜色,就需要去改动 shader,比较麻烦,也不好调试。解决上面问题,我们需要写一个通用的 shader,接受外部数据,从而改变绘制的几何信息。在学习之前,先简单了解下着色器语言——WGSL。

WGSL

官方文档

数据类型

类型类型说明
bool无符号整数
i32有符号整数
f3232 位浮点数
f1616 位浮点数

变量

// 通过var声明
var a:f32 = 1.0 // 32位浮点型
var a = 1.0 // 推断为f32

函数

//fn 函数名( 参数1:数据类型, 参数2:数据类型...){}
fn add(a:f32,b:f32){
  var c:f32 = a + b;
}

// fn 函数名( 参数1, 参数2...) -> 返回值数据类型{}
fun add(a:f32,b:f32) --> f32 {
  return a + b;
}

向量

var pos:vec3<f32>; // 三维向量
pos= vec3<f32>(1.0, 2.0, 3.0);

var pos:vec4<f32>; // 四维向量
pos= vec4<f32>(1.0, 2.0, 3.0,1.0);

// 三维转四维
var pos:vec3<f32>;
pos = vec3<f32>(1.0, 2.0, 3.0);
var pos2 = vec4<f32>(pos,1.0);//等价于vec4<f32>(1.0, 2.0, 3.0,1.0)

编写通用 shader

通过上面的学习,我们了解到如何书写一个函数和声明一个向量,接下来我们修改下上一篇写的 shader。

// @vertex
// fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
//     var pos = array<vec2<f32>, 3>(
// 	    vec2<f32>(0.0, 0.5),
// 	    vec2<f32>(-0.5, -0.5),
// 	    vec2<f32>(0.5, -0.5)
//     );
//     return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
// }

@vertex
fn main(@location(0) position : vec3<f32>) -> @builtin(position) vec4<f32> {
  return vec4<f32>(position,1.0);
}
//@fragment
//fn main() -> @location(0) vec4<f32> {
//    return vec4<f32>(1.0, 0.0, 0.0, 1.0);
//}
@group(0) @binding(0) var<uniform> color : vec4<f32>;

@fragment
fn main() -> @location(0) vec4<f32> {
    return color;
}

location是 WGSL 语言的一个关键字,通用用来指定顶点缓冲区相关的顶点数据,使用 location 的时候需要加上@符号前缀,@location()小括号里面设置参数。 main 函数的参数@location(0)表示你 GPU 显存中标记为 0 的顶点缓冲区中顶点数据。

builtin是 WGSL 语言的一个关键字。

main 函数的返回是顶点数据,这时候除了设置返回值数据类型,还需要设置@builtin(position),表明返回值是顶点位置数据。

数据传输

定义数据

const vertex = new Float32Array([-1.0, -1.0, 0.0, 1.0, -1.0, 0.0, 0.0, 1.0, 0.0]);
const color = new Float32Array([1.0, 0.0, 0.0, 1.0]);

写入数据

// vertext
const vertexBuffer = (this.device as GPUDevice).createBuffer({
  size:vertex.byteLength,
  usage:GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
});

(this.device as GPUDevice).queue.writeBuffer(vertexBuffer,0,vertex)

// fragment
 const colorBuffer = (this.device as GPUDevice).createBuffer({
  size:color.byteLength,
  usage:GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
});
(this.device as GPUDevice).queue.writeBuffer(colorBuffer,0,color);
const uniformGroup = (this.device as GPUDevice).createBindGroup({
      label: 'Uniform Group with colorBuffer',
      layout: (this.pipeline as GPURenderPipeline).getBindGroupLayout(0),
      entries: [
        {
          binding: 0,
          resource: {
                buffer: colorBuffer
          }
        }
      ]
    })

关联顶点缓冲区数据和渲染管线

passEncoder.setBindGroup(0,uniformGroup);
passEncoder.setVertexBuffer(0, vertexBuffer);

绘制

passEncoder.draw(3);

完整代码

import vertexCode from '../shaders/triangle.vertex.wgsl?raw' // 指向上面写的shader
import fragCode from '../shaders/frag.wgsl?raw' // 指向上面写的shader
export class Triangle {
    device?:GPUDevice
    format?:GPUTextureFormat
    context?:GPUCanvasContext
    pipeline?:GPURenderPipeline
    async init() {
        return new Promise<void>(async (resolve)=>{
            if (!navigator.gpu) {
                throw new Error('不支持wegpug')
            }
            const adapter = await navigator.gpu.requestAdapter();
            if (!adapter) {
                throw new Error('获取不到adapter')
            }
            const device = await adapter.requestDevice();
            if (!device) {
                throw new Error('获取不到device')
            }
            const format = navigator.gpu.getPreferredCanvasFormat();
    
            const canvas = document.getElementById('canvas') as HTMLCanvasElement;
            if (!canvas) {
                throw new Error('获取不到canvas')
            }
            const devicePixelRatio = window.devicePixelRatio || 1
            canvas.width = canvas.clientWidth * devicePixelRatio
            canvas.height = canvas.clientHeight * devicePixelRatio
            const context = canvas.getContext('webgpu') as GPUCanvasContext
            context.configure({
                device, format,
                alphaMode: 'opaque'
            })
            // 创建渲染管线
            const pipeline= await this.createPipeline(device, format);
    
            this.device = device;
            this.context = context;
            this.format = format;
            this.pipeline = pipeline;
            resolve()
        })
    }
    async draw(vertex:Float32Array,color:Float32Array) {
        // vertex
        const vertexBuffer = (this.device as GPUDevice).createBuffer({
            size:vertex.byteLength,
            usage:GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST
        });

        (this.device as GPUDevice).queue.writeBuffer(vertexBuffer,0,vertex)

        // fragment
        const colorBuffer = (this.device as GPUDevice).createBuffer({
            size:color.byteLength,
            usage:GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST
        });
        (this.device as GPUDevice).queue.writeBuffer(colorBuffer,0,color);
        const uniformGroup = (this.device as GPUDevice).createBindGroup({
            label: 'Uniform Group with colorBuffer',
            layout: (this.pipeline as GPURenderPipeline).getBindGroupLayout(0),
            entries: [
                {
                    binding: 0,
                    resource: {
                        buffer: colorBuffer
                    }
                }
            ]
        })

        // 开始绘制
        const commandEncoder = (this.device as GPUDevice).createCommandEncoder()
        const view = (this.context as  GPUCanvasContext).getCurrentTexture().createView()
        const renderPassDescriptor: GPURenderPassDescriptor = {
            colorAttachments: [
                {
                    view: view,
                    clearValue: { r: 0, g: 0, b: 0, a: 1.0 },
                    loadOp: 'clear', // clear/load
                    storeOp: 'store' // store/discard
                }
            ]
        }
        const passEncoder:GPURenderPassEncoder = commandEncoder.beginRenderPass(renderPassDescriptor)
        passEncoder.setPipeline(this.pipeline as GPURenderPipeline);
        passEncoder.setBindGroup(0,uniformGroup);
        passEncoder.setVertexBuffer(0, vertexBuffer);
        passEncoder.draw(3);
        passEncoder.end();
        (this.device as GPUDevice).queue.submit([commandEncoder.finish()])
    }
    async createPipeline(device: GPUDevice, format: GPUTextureFormat) {
        // 顶点
        const vertexModule = device.createShaderModule({
            code: vertexCode,
        })
        // 片元
        const fragModule = device.createShaderModule({
            code: fragCode
        })

        const pipeline = await device.createRenderPipelineAsync({
            vertex: {
                module: vertexModule,
                entryPoint: 'main',
                buffers:[
                    {
                        arrayStride:3*4, // 每三个值作为一个点
                        attributes:[
                            {
                                shaderLocation:0,
                                offset:0,
                                format:'float32x3'
                            }
                        ]
                    }
                ]
            },
            fragment: {
                module: fragModule,
                entryPoint: 'main',
                targets: [{
                    format
                }]
            },
            layout: 'auto'
        })
        return pipeline;
    }
}