ThreeJS Example webgl_postprocessing_ssao

963 阅读2分钟

image.png

THREE API

  • 后处理声明
composer = new EffectComposer( renderer );
const ssaoPass = new SSAOPass( scene, camera, width, height );
ssaoPass.kernelRadius = 16;
composer.addPass( ssaoPass );
  • ssaoPass变量 不同
gui.add( ssaoPass, 'output', {
        'Default': SSAOPass.OUTPUT.Default,
        'SSAO Only': SSAOPass.OUTPUT.SSAO,
        'SSAO Only + Blur': SSAOPass.OUTPUT.Blur,
        'Beauty': SSAOPass.OUTPUT.Beauty,
        'Depth': SSAOPass.OUTPUT.Depth,
        'Normal': SSAOPass.OUTPUT.Normal
} ).onChange( function ( value ) {

        ssaoPass.output = parseInt( value );

} );
  • render没有renderer.render
const timer = performance.now();
group.rotation.x = timer * 0.0002;
group.rotation.y = timer * 0.0001;

composer.render();

SSAO Shader

image.png

  • 流程
    • 建立视口空间下的切线空间 (需要法线向量)
    • 建立视口空间下的噪点 (需要视口空间坐标)
    • 噪点转到屏幕空间,当然噪点的Z值是知道的
      • 但凡转到屏幕空间,都有两个常用的方法
        • 获取屏幕空间下的z值
        • 根据屏幕空间的uv上贴图
    • Z的深度与屏幕空间的深度进行对比,深度小意味着没被遮挡
    • Z的深度小的占越多,越亮,越多越暗

顶点着色器

varying vec2 vUv;
void main() {
    vUv = uv;
    gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}

片元着色器

void main() {
    //为了获取世界坐标
    float depth = getDepth( vUv ); //先获取屏幕空间的深度
    float viewZ = getViewZ( depth ); //通过屏幕空间的深度 找到对应的视口空间的Z值 这个主要用来算w的
    vec3 viewPosition = getViewPosition( vUv, depth, viewZ ); //获取到屏幕空间的视口空间坐标点 到视口空间的坐标点
    //建立TBN
    vec3 viewNormal = getViewNormal( vUv ); //获取屏幕空间每个点的法线
    //正交化
    vec2 noiseScale = vec2( resolution.x / 4.0, resolution.y / 4.0 );
    vec3 random = texture2D( tNoise, vUv * noiseScale ).xyz;
    vec3 tangent = normalize( random - viewNormal * dot( random, viewNormal ) );
    vec3 bitangent = cross( viewNormal, tangent );
    mat3 kernelMatrix = mat3( tangent, bitangent, viewNormal );
    //计算
    float occlusion = 0.0;
    for ( int i = 0; i < KERNEL_SIZE; i ++ ) {
        vec3 sampleVector = kernelMatrix * kernel[ i ];
        vec3 samplePoint = viewPosition + ( sampleVector * kernelRadius );
        vec4 samplePointNDC = cameraProjectionMatrix * vec4( samplePoint, 1.0 );
        samplePointNDC /= samplePointNDC.w;
        vec2 samplePointUv = samplePointNDC.xy * 0.5 + 0.5;
        float realDepth = getLinearDepth( samplePointUv ); //有透视投影的深度贴图了 转到正交投影的深度贴图
        float sampleDepth = viewZToOrthographicDepth( samplePoint.z, cameraNear, cameraFar ); //转正交投影
        float delta = sampleDepth - realDepth;
        if ( delta > minDistance && delta < maxDistance ) {
            occlusion += 1.0;
        }
    }
    occlusion = clamp( occlusion / float( KERNEL_SIZE ), 0.0, 1.0 );
    gl_FragColor = vec4( vec3( 1.0 - occlusion ), 1.0 );
}
  • getDepth
    • 作用是获取深度,透视投影是非线性的,正交投影是线性的
    • 这个是屏幕空间了
float depth = getDepth( vUv ); 
float getDepth( const in vec2 screenPosition ) {
    return texture2D( tDepth, screenPosition ).x;
}
  • viewZ是指视口空间下的Z
float viewZ = getViewZ( depth ); //这个viewZ是用来获取clipW用的
float getViewZ( const in float depth ) {
    #if PERSPECTIVE_CAMERA == 1
        return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
    #else
        return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
    #endif
}
float perspectiveDepthToViewZ( const in float invClipZ, const in float near, const in float far ) {
    return ( near * far ) / ( ( far - near ) * invClipZ - far );
}
float orthographicDepthToViewZ( const in float linearClipZ, const in float near, const in float far ) {
    return linearClipZ * ( near - far ) - near;
}
  • 获取真实的位置,视口空间的坐标值
    • 这里是depth,viewZ,相机信息来获取视口坐标
    • 获取视口坐标 是为了建立采样点
vec3 viewPosition = getViewPosition( vUv, depth, viewZ );
vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {
    float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];
    vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );
    clipPosition *= clipW; // unprojection.
    
    return ( cameraInverseProjectionMatrix * clipPosition ).xyz;
}
  • 通过贴图获取normal image.png
vec3 viewNormal = getViewNormal( vUv );
vec3 getViewNormal( const in vec2 screenPosition ) {
    return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );
}
  • 通过noise来获取随机数
    • noise为啥要除以4.0,只是为了放大
vec2 noiseScale = vec2( resolution.x / 4.0, resolution.y / 4.0 );
vec3 random = texture2D( tNoise, vUv * noiseScale ).xyz;
  • 某种正交算法
vec3 tangent = normalize( random - viewNormal * dot( random, viewNormal ) ); //单位向量
vec3 bitangent = cross( viewNormal, tangent ); //单位向量
mat3 kernelMatrix = mat3( tangent, bitangent, viewNormal );
  • kernel的数量有31组
Uniforms(2)
name: kernel
size: 32
type: FLOAT_VEC3
location: WebGLUniformLocation - ID: 89
Values(0)
value: 0.0711, -0.0464, 0.0528
Values(1)
value: 0.0771, -0.0186, 0.0623
Values(2)
value: -0.0611, 0.0816, 0.0176
Values(3)
value: -0.0983, 0.0199, 0.0397
Values(4)
value: -0.0660, -0.0515, 0.0775
Values(5)
value: -0.0848, 0.0843, 0.0244
Values(6)
value: -0.0434, 0.1012, 0.0721
Values(7)
value: -0.0748, -0.0928, 0.0791
Values(8)
value: 0.0822, 0.0962, 0.0917
Values(9)
value: 0.0274, 0.1587, 0.0581
Values(10)
value: 0.1172, 0.0725, 0.1277
Values(11)
value: 0.1137, 0.0740, 0.1555
Values(12)
value: 0.1143, 0.0681, 0.1834
Values(13)
value: -0.0598, -0.0925, 0.2228
Values(14)
value: 0.1982, 0.1635, 0.0900
Values(15)
value: 0.0541, -0.1887, 0.2239
Values(16)
value: 0.2186, 0.1609, 0.1787
Values(17)
value: 0.1456, -0.2860, 0.1493
Values(18)
value: -0.2767, 0.0464, 0.2633
Values(19)
value: -0.4143, 0.0389, 0.0310
Values(20)
value: 0.2169, 0.3689, 0.1441
Values(21)
value: -0.1308, 0.4624, 0.0826
Values(22)
value: -0.0641, -0.3536, 0.3833
Values(23)
value: 0.0723, -0.2761, 0.4875
Values(24)
value: 0.0797, -0.6010, 0.0043
Values(25)
value: -0.5125, -0.1256, 0.3784
Values(26)
value: -0.4081, 0.4660, 0.3132
Values(27)
value: 0.3065, 0.6405, 0.2111
Values(28)
value: -0.0723, 0.6683, 0.4133
Values(29)
value: 0.4024, 0.5203, 0.5211
Values(30)
value: 0.3327, 0.0732, 0.8233
Values(31)
value: -0.0070, -0.4160, 0.8480
blockIndice: -1
offset: -1
arrayStride: -1
matrixStride: -1
rowMajor: false
  • 计算occlusion
//从视口空间到屏幕空间
vec3 sampleVector = kernelMatrix * kernel[ i ]; //新切线空间下的随机一个点
vec3 samplePoint = viewPosition + ( sampleVector * kernelRadius ); //视口空间 kernelRadius类似半径
vec4 samplePointNDC = cameraProjectionMatrix * vec4( samplePoint, 1.0 ); //投影空间
samplePointNDC /= samplePointNDC.w; //透视除法
vec2 samplePointUv = samplePointNDC.xy * 0.5 + 0.5; //屏幕空间

//屏幕空间的的点XY的位置 为什么要线性深度进行对比呢
float realDepth = getLinearDepth( samplePointUv ); //屏幕空间的的点XY的位置 模型的线性深度 从深度图(透视相机)获得的
float sampleDepth = viewZToOrthographicDepth( samplePoint.z, cameraNear, cameraFar );//采样点的线性深度
float delta = sampleDepth - realDepth; //如果大于0 采样点被遮蔽
if ( delta > minDistance && delta < maxDistance ) { 
    occlusion += 1.0;
}
float getLinearDepth( const in vec2 screenPosition ) {
    #if PERSPECTIVE_CAMERA == 1
        float fragCoordZ = texture2D( tDepth, screenPosition ).x;
        float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
        return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
    #else
        return texture2D( tDepth, screenPosition ).x;
    #endif
}
  • 最后
    occlusion = clamp( occlusion / float( KERNEL_SIZE ), 0.0, 1.0 ); 
    gl_FragColor = vec4( vec3( 1.0 - occlusion ), 1.0 ); //occlusion越大,采样点被遮蔽的越多,1-occlusion越小,就越黑

均值模糊

image.png

顶点着色器

varying vec2 vUv;
void main() {
    vUv = uv;
    gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}

片元着色器

uniform sampler2D tDiffuse;
uniform vec2 resolution;
varying vec2 vUv;
void main() {
    vec2 texelSize = ( 1.0 / resolution );
    float result = 0.0;
    for ( int i = - 2; i <= 2; i ++ ) {
        for ( int j = - 2; j <= 2; j ++ ) {
            vec2 offset = ( vec2( float( i ), float( j ) ) ) * texelSize;
            result += texture2D( tDiffuse, vUv + offset ).r;
        }

    }
    gl_FragColor = vec4( vec3( result / ( 5.0 * 5.0 ) ), 1.0 );
}

融合

image.png image.png

  • BLEND: true
  • BLEND_COLOR: 0, 0, 0, 0
  • BLEND_DST_ALPHA: ZERO
  • BLEND_DST_RGB: ZERO
  • BLEND_EQUATION_ALPHA: FUNC_ADD
  • BLEND_EQUATION_RGB: FUNC_ADD
  • BLEND_SRC_ALPHA: DST_ALPHA
  • BLEND_SRC_RGB: DST_COLOR
  • validCommandIds: 751