WebGL实现体渲染的一种方法

3,208 阅读3分钟

体渲染一般用于展示3D分布图及扫描图等。本文讨论一种适合在WebGL端实现的体渲染方法。

设计思路[1]

为了简单起见,本文只讨论立方体区域的体渲染方法。3D图像信息可以存储在3DTexture中,采样并计算色值的过程主要在片元着色器中完成。

射线检测

如上图所示,O代表相机的位置,V代表视线的观察方向,P1与P2代表视线在立方体表面的两个交点。若求得P1与P2的坐标,则可以通过P1到P2的平均采样与Alpha混合来得到最终眼睛看到的色值。

P1与P2的坐标如何计算呢?

为了在片元着色器中计算方便,计算坐标系确定方形区域的中心为原点。点O的坐标可以通过相机世界坐标左乘立方体变换矩阵的逆来求得,P1的坐标即为该片元的坐标。通过O与P1的坐标则可以确定视线的方向向量V。

假设立方体的size为(0.5,0.5,0.5),且立方体的原点坐标为(0,0,0),可以通过以下方法计算线段O-P1与O-P2的距离[2]

// axis aligned box centered at the origin, with size boxSize
vec2 boxIntersection(vec3 ro, vec3 rd, vec3 boxSize) {
    vec3 m = 1.0 / rd; // can precompute if traversing a set of aligned boxes
    vec3 n = m * ro;   // can precompute if traversing a set of aligned boxes
    vec3 k = abs(m) * boxSize;
    vec3 t1 = -n - k;
    vec3 t2 = -n + k;
    float tN = max(max(t1.x, t1.y), t1.z);
    float tF = min(min(t2.x, t2.y), t2.z);
    if( tN > tF || tF < 0.0) return vec2(-1.0); // no intersection
    return vec2( tN, tF );
}

已知坐标O,向量V,以及线段O-P1与O-P2的距离,则可以计算出P1与P2的坐标了,进而可以通过P1到P2的平均采样与Alpha混合来得到最终眼睛看到的色值。

具体实现

采用zen3d作为示例的WebGL渲染库。

片元着色器的主要代码如下[1]

precision highp sampler3D;

#include <common_frag>
varying vec3 v_modelPos;

uniform sampler2D platteTexture;
uniform sampler3D densityTexture;
uniform mat4 uInvTransform;
uniform float uAlphaCorrection;
const float STEP = 1.73205081 / 256.0;

// http://iquilezles.org/www/articles/intersectors/intersectors.htm
// axis aligned box centered at the origin, with size boxSize
vec2 boxIntersection(vec3 ro, vec3 rd, vec3 boxSize) {
	vec3 m = 1.0 / rd; // can precompute if traversing a set of aligned boxes
	vec3 n = m * ro;   // can precompute if traversing a set of aligned boxes
	vec3 k = abs(m) * boxSize;
	vec3 t1 = -n - k;
	vec3 t2 = -n + k;
	float tN = max(max(t1.x, t1.y), t1.z);
	float tF = min(min(t2.x, t2.y), t2.z);
	if( tN > tF || tF < 0.0) return vec2(-1.0); // no intersection
	return vec2( tN, tF );
}

vec4 getColor(float intensity) {
	// makes the volume looks brighter;
	intensity = min(0.46, intensity) / 0.46;
	vec2 _uv = vec2(intensity, 0);
	vec4 color = texture2D(platteTexture, _uv);
	float alpha = intensity;
	if (alpha < 0.03) {
		alpha = 0.01;
	}
	return vec4(color.r, color.g, color.b, alpha);
}

vec4 sampleAs3DTexture(vec3 texCoord) {
	texCoord += vec3(0.5);
	return getColor(texture(densityTexture, texCoord).r);
}

vec3 shade(inout float transparent, in vec3 P, in vec3 V) {
	// Transform to model space.
	vec3 frontPos = (uInvTransform * vec4(P.xyz, 1.0)).xyz;
	vec3 cameraPos = (uInvTransform * vec4(u_CameraPosition.xyz, 1.0)).xyz;
	vec3 rayDir = normalize(frontPos - cameraPos);
	vec3 backPos = frontPos;
	vec2 t = boxIntersection(cameraPos, rayDir, vec3(0.5));
	if (t.x > -1.0 && t.y > -1.0) {
	    backPos = cameraPos + rayDir * t.y;
	}
	float rayLength = length(backPos - frontPos);
	int steps = int(max(1.0, floor(rayLength / STEP)));
	// Calculate how long to increment in each step.
	float delta = rayLength / float(steps);
	// The increment in each direction for each step.
	vec3 deltaDirection = rayDir * delta;
	// Start the ray casting from the front position.
	vec3 currentPosition = frontPos;
	// The color accumulator.
	vec4 accumulatedColor = vec4(0.0);
	// The alpha value accumulated so far.
	float accumulatedAlpha = 0.0;
	vec4 colorSample;
	float alphaSample;
	// Perform the ray marching iterations
	for (int i = 0; i < steps; i++) {
	colorSample = sampleAs3DTexture(currentPosition);
	alphaSample = colorSample.a * uAlphaCorrection;
	alphaSample *= (1.0 - accumulatedAlpha);
	// Perform the composition.
	accumulatedColor += colorSample * alphaSample;
	// Store the alpha accumulated so far.
	accumulatedAlpha += alphaSample;
	// Advance the ray.
	currentPosition += deltaDirection;
	}
	transparent = accumulatedAlpha;
	return accumulatedColor.xyz;
}

void main() {
	vec3 V = normalize(v_modelPos - u_CameraPosition);
	vec3 P = v_modelPos;
	float transparent;
	vec3 color = shade(transparent, P, V);
	gl_FragColor = vec4(color, transparent);
}

完整示例代码在这里,采用simplex-noise[3]生成随机的3D纹理数据。

测试结果

渲染结果截图:

渲染结果截图

在线测试

参考资料

[1] github: modelo/API_samples/volume-rendering

[2] iquilezles.org

[3] github: simplex-noise