WebGL 正交相机下的ssao解析

461 阅读2分钟

pbr (mecg.me)

image.png

正交相机下的深度图

attribute vec3 v3Pos;
uniform mat4 viewMatrix3D;
uniform mat4 camMatrix3D;
uniform mat4 posMatrix3D;
void main(void) {
    vec4 vt0 = vec4(v3Pos, 1.0);
    vt0 = posMatrix3D * vt0;
    vt0 = camMatrix3D * vt0;
    vt0 = viewMatrix3D * vt0;
    gl_Position = vt0; //正常的MVP操作
}
precision mediump float;
vec4 packDepth(float depth) {
    const vec4 bitShift = vec4(1.0, 255.0, 255.0 * 255.0, 255.0 * 255.0 * 255.0);
    const vec4 bitMask = vec4(1.0/255.0, 1.0/255.0, 1.0/255.0, 0.0);
    vec4 rgbaDepth = fract(depth * bitShift);
    rgbaDepth -= rgbaDepth.gbaa * bitMask;
    return rgbaDepth;
}
void main(void) {
    //float lineNum = LinearizeDepth(gl_FragCoord.z);
    //vec4 rgbaDepth = packDepth(lineNum);
    vec4 rgbaDepth = packDepth(pow(gl_FragCoord.z, 128.0));//通过gl_FragCoord.z的方式打包
    gl_FragColor = rgbaDepth;//vec4(gl_FragCoord.z, gl_FragCoord.z, gl_FragCoord.z, 1.0);
}

  • 正交相机 信息 image.png

SSAO贴图生成

image.png image.png

precision highp float;
attribute vec3 v3Pos;
attribute vec3 v3Nor;
attribute vec2 v2Uv;
uniform mat4 viewMatrix3D;
uniform mat4 camMatrix3D;
uniform mat4 posMatrix3D;
//---
varying vec3 vNormal;//法线
varying vec3 vPosition;//位置
varying vec2 vUv;//uv

void main(void) {
    vec4 vt0 = vec4(v3Pos, 1.0);
    vt0 = posMatrix3D * vt0;
    vt0 = camMatrix3D * vt0;
    vPosition = vec3(vt0.x, vt0.y, vt0.z);
    mat3 normalMatrix = mat3(camMatrix3D * posMatrix3D);
    normalMatrix = transpose(inverse(normalMatrix));
    vNormal = normalize(normalMatrix * v3Nor);
    //vNormal = v3Nor;
    
    vUv = v2Uv;
    vt0 = viewMatrix3D * vt0;
    gl_Position = vt0;
}
  • 流程
    • 建立视口空间下的切线空间 (需要法线向量)
    • 建立视口空间下的噪点 (需要视口空间坐标)
    • 噪点转到屏幕空间,当然噪点的Z值是知道的
      • 但凡转到屏幕空间,都有两个常用的方法
        • 获取屏幕空间下的z值
        • 根据屏幕空间的uv上贴图
    • Z的深度与屏幕空间的深度进行对比,深度小意味着没被遮挡
    • Z的深度小的占越多,越亮,越多越暗
precision highp float;
varying vec3        vNormal;//法线
varying vec3        vPosition;//位置
varying vec2 		vUv;//uv

uniform sampler2D 	depthTexture;
uniform sampler2D 	noiseTexture;
uniform mat4        projection;
uniform vec3 samples[48]; //是个很小的数 一共有48组
int kernelSize = 48;
float radius = 1.2;
float unpackDepth(const in vec4 rgbaDepth) {
    const vec4 bitShift = vec4(1.0, 1.0/255.0, 1.0/(255.0 * 255.0), 1.0/(255.0*255.0*255.0));
    float depth = dot(rgbaDepth, bitShift);
    return depth;
}
void main(void) {
    //随机正交化 随机贴图->正交化->cross->TBN = 切线空间 
    vec3 randomVec = texture2D(noiseTexture, vUv * 10.0).xyz;//*10变小了,更具有随机性,这里不normal了
    vec3 tangent = normalize(randomVec - vNormal * dot(randomVec, vNormal));
    vec3 bitangent = cross(vNormal, tangent);
    mat3 TBN = mat3(tangent, bitangent, vNormal);
    //
    float occlusion = 0.0;
    for(int i = 0; i < 48; ++i) {
        //sample 视口空间下的坐标 vPosition 是已知的
        vec3 sample = TBN * samples[i]; // From tangent to view-space
        sample = vPosition + sample * radius + vNormal * 0.03;  
        
        //空间转换 offset最后是屏幕空间 这是采样点的坐标
        vec4 offset = vec4(sample, 1.0);
        offset = projection * offset; // from view to clip-space
        offset.xyz /= offset.w; // perspective divide
        offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0 
        
        //因为offset是屏幕空间了 所以offset.xy是可以直接使用 offset.xy是sample下的点的位置
        //modelDepth是offset.xy点的模型深度
        vec4 texDepthColor = texture2D(depthTexture, offset.xy);
        float modelDepth = unpackDepth(texDepthColor);

        //sampleDepth是sample下点的深度
        float sampleDepth = pow(offset.z, 128.0);
        
        //modelDepth < sampleDepth 是指模型深度<采样点深度的话,采样点被遮挡,为0
        float rangeCheck = smoothstep(0.0, 1.0, radius / abs(sampleDepth - modelDepth ));
        occlusion += (modelDepth < sampleDepth ? 0.0 : 1.0) * rangeCheck;
    }
    occlusion = (occlusion / 48.0);
    gl_FragColor = vec4(occlusion, occlusion, occlusion, 1.0);
}
  • 正交相机 image.png