WebGL 屏幕空间次表面散射Screen-Space Subsurface Scattering sssss

1,341 阅读3分钟

关键词

  • 屏幕空间次表面散射
  • Screen-Space Subsurface Scattering
  • SSSSS

效果对比1

重新找了一个皮肤表现比较明显的模型,布鲁斯·威利斯大叔,下面看一下各种效果的对比:

  • 普通PBR,无SSS
  • SSSSS
  • 总感觉眼神都不对了
  • 比普通PBR 面更和善一些
  • 眼球那部分变暗淡了些
  • 预积分的次表面散射
  • 预积分的方式更亮了些
  • 比SSSSS要严肃一些
  • SSSSS+预积分
  • 这个效果好些

效果对比2

  • diffuse\blurred\specualr image.png

计算原理

  • 主流的计算方法,屏幕空间方法,漫反射来模拟次表面散射
    • [公式] 
      • [公式] 代表一次高斯模糊,
        • 高斯模糊的模糊距离代表变量(Variance),距离越大模糊值越大
      • weight代表颜色权重
vec4 BlurPS(vec2 uv2, vec2 step) {
    //高斯模糊的距离和权重信息
    float w[6] = { 0.006,   0.061,   0.242,  0.242,  0.061, 0.006 };
    float o[6] = {  -1.0, -0.6667, -0.3333, 0.3333, 0.6667,   1.0 };

    vec4 bse = texture2D( diffuseTexture, uv2);
    if(bse.w == 0.0){
        return vec4(0.0,0.0,0.0,0.0);
    }

    vec3 colorM = getHdr(bse);//采样diffuseColor
    float depthM = unpackDepth(texture2D( depthTexture, uv2));//采样深度信息

    vec3 colorBlurred = colorM;
    colorBlurred *= 0.382;

    vec2 finalStep = step / depthM;//模糊方向和距离缩放

    for (int i = 0; i < 6; i++) {
        vec2 offset = uv2 + o[i] * finalStep;
        vec3 color = getHdr(texture2D( diffuseTexture, offset));
        float depth = unpackDepth(texture2D( depthTexture, offset));

        float s = min(0.0125 * correction * abs(depthM - depth), 1.0);//如果距离差距过大则丢弃
        color = mix(color, colorM, s);
        colorBlurred += w[i] * color;
    }

    return setHdr(colorBlurred);
}

示例1

Diffuse

image.png

precision mediump float;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
uniform vec3 lightPos_world;
attribute vec3 vertexPos_model;
attribute vec3 vertexNormal_model;
attribute vec2 vertexUV;
varying vec2 uv;
varying vec3 pos_world;
varying vec3 normal_world;
varying vec3 lightDir_world;
void main() {
    gl_Position = P * V * M * vec4(vertexPos_model, 1.0);
    uv = vertexUV;
    pos_world = (M * vec4(vertexPos_model, 1.0)).xyz;
    // We can use M here instead of its inverse transpose because it does not scale the model.
    
    normal_world = mat3(M) * vertexNormal_model;
    lightDir_world = lightPos_world - pos_world;
}
precision mediump float;
uniform sampler2D diffuseTexture;
uniform vec3 cameraPos_world;
uniform vec3 lightPos_world;
uniform vec3 lightColor;
varying vec2 uv;
varying vec3 pos_world;
varying vec3 normal_world;
varying vec3 lightDir_world;
void main() {
    vec3 ambientColor = vec3(0.2);
    vec3 materialDiffuseColor = texture2D(diffuseTexture, uv).rgb * 0.6;
    float distance = length(lightPos_world - pos_world);
    vec3 n = normalize(normal_world);
    vec3 l = normalize(lightDir_world);
    float cosTheta = clamp(dot(n, l), 0.0, 1.0);
    
    
    //环境光 = 材质颜色 * 环境光颜色
    //点灯光 = 材质颜色 * 灯光颜色 * 夹角 / (distince)^2
    vec3 finalColor = materialDiffuseColor * (ambientColor + lightColor * cosTheta / (distance * distance));
    gl_FragColor = vec4(finalColor, 1.0);
}

Specular

image.png

precision mediump float;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
uniform vec3 lightPos_world;
attribute vec3 vertexPos_model;
attribute vec3 vertexNormal_model;
attribute vec2 vertexUV;
varying vec2 uv;
varying vec3 pos_world;
varying vec3 normal_world;
varying vec3 lightDir_world;
void main() {
    gl_Position = P * V * M * vec4(vertexPos_model, 1.0);
    uv = vertexUV;
    pos_world = (M * vec4(vertexPos_model, 1.0)).xyz;
    // We can use M here instead of its inverse transpose because it does not scale the model.
    
    normal_world = mat3(M) * vertexNormal_model;
    lightDir_world = lightPos_world - pos_world;
}
precision mediump float;
uniform sampler2D diffuseTexture;
uniform vec3 cameraPos_world;
uniform vec3 lightPos_world;
uniform vec3 lightColor;
varying vec2 uv;
varying vec3 pos_world;
varying vec3 normal_world;
varying vec3 lightDir_world;
void main() {
    vec3 materialSpecularColor = vec3(0.4);
    float materialShininess = 5.0;
    float distance = length(lightPos_world - pos_world);
    vec3 n = normalize(normal_world);
    vec3 e = normalize(cameraPos_world - pos_world);
    vec3 l = normalize(lightDir_world);
    vec3 finalColor;
    if (dot(n, l) < 0.0) {
        finalColor = vec3(0.0);
    }
    else {
        vec3 r = reflect(-l, n);
        float cosAlpha = clamp(dot(e, r), 0.0, 1.0);
        vec3 h = normalize(l + e);
        float w = pow(1.0 - max(0.0, dot(h, e)), 5.0);  // Schlick's approximation
        
        //暂没分析出高光部分是如何算的
        finalColor = lightColor * mix(vec3(materialSpecularColor), vec3(1.0), w)
        * pow(cosAlpha, materialShininess) / (distance * distance);
    }
    gl_FragColor = vec4(finalColor, 1.0);
}

后处理Texture

  • BlurDiffuse + Diffuse image.png
precision mediump float;
attribute vec2 vertexPos;
varying vec2 uv;
void main() {
    gl_Position = vec4(vertexPos, 0.0, 1.0); //就直接顶点数据 区间[-1,1]
    uv = (vertexPos + vec2(1.0, 1.0)) / 2.0; //区间[0,1]
}
precision mediump float;
uniform sampler2D colorTexture;
uniform sampler2D depthTexture;
uniform vec2 zNearFar;
uniform vec2 step;
uniform float depthDifferenceCutoff;
varying vec2 uv;

// See http://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer

float linearDepth(in float z_b) {
    float zNear = zNearFar.x;
    float zFar = zNearFar.y;
    float z_n = 2.0 * z_b - 1.0;
    float z_e = 2.0 * zNear * zFar / (zFar + zNear - z_n * (zFar - zNear));
    return z_e;
}
vec4 blur(in vec4 colorM, in float depthM_buffer) {
    // Gaussian weights
    float w[8]; //8 个
    w[0] = w[7] = 0.016;
    w[1] = w[6] = 0.054;
    w[2] = w[5] = 0.122;
    w[3] = w[4] = 0.195;
    float o[8]; //8 个
    o[0] = -1.00;
    o[1] = -0.75;
    o[2] = -0.50;
    o[3] = -0.25;
    o[4] = 0.25;
    o[5] = 0.50;
    o[6] = 0.75;
    o[7] = 1.00;
    
    // Fetch linear depth for current pixel.
    
    float depthM = linearDepth(depthM_buffer); //转为线性深度
    
    // Accumulate center sample, multiplying it with its Gaussian weight:
    
    vec4 colorBlurred = colorM; //diffuse
    colorBlurred.rgb *= 0.227; //系数 这系数哪来的?
    
    // Calculate the step that we will use to fetch the surrounding pixels, // where "step" is:
    // step = sssStrength * gaussianWidth * pixelSize * dir
    // The closer the pixel, the stronger the effect needs to be, hence
    // the factor 1.0 / depthM.
    vec2 finalStep = step / depthM; //(step给的是0.0050, 0)
    
    // Accumulate the other samples:
    
    for (int i = 0; i < 8; i++) {
        // Fetch color and depth for current sample:
        vec2 offset = uv + o[i] * finalStep;
        vec3 color = texture2D(colorTexture, offset).rgb; 
        float depth = linearDepth(texture2D(depthTexture, offset).r);
        
        // If the difference in depth is huge, we lerp color back to "colorM":
        
        float s = min(depthDifferenceCutoff * abs(depthM - depth), 1.0);// depthDifferenceCutoff 50
        color = mix(color, colorM.rgb, s); //权重插值
        
        // Accumulate:
        
        colorBlurred.rgb += w[i] * color;
    }
    return colorBlurred;
}
void main() {
    // Fetch color and depth for current pixel.
    vec4 colorM = texture2D(colorTexture, uv);
    float depthM_buffer = texture2D(depthTexture, uv).r; //这个深度贴图好像没有传进来 
    if (depthM_buffer < 1.0)
        gl_FragColor = blur(colorM, depthM_buffer);
    else
        gl_FragColor = colorM;
}

后处理Texture

  • Diffuse + Specualr
precision mediump float;
attribute vec2 vertexPos;
varying vec2 uv;
void main() {
    gl_Position = vec4(vertexPos, 0.0, 1.0);
    uv = (vertexPos + vec2(1.0, 1.0)) / 2.0;
}
precision mediump float;
uniform sampler2D diffuseTexture;
uniform sampler2D specularTexture;
varying vec2 uv;
void main() {
    gl_FragColor = texture2D(diffuseTexture, uv) + texture2D(specularTexture, uv);
}