在之前的一篇 文章 里详解了射线碰撞的工作原理,计算量比较大,对于需要频繁使用物体拾取的场景其实不太友好,模型很大的话帧率很有可能瞬间就掉下来,这次介绍下 GPU拾取
的原理,以及如何高效地使用,并能应用到带动画的物体,目前对于 BatchedMesh
的支持还没研究过。
three.js
的官网有一个 GPU
拾取的例子,是针对静态物体的,但是却要重新渲染一个场景和显示场景保持同步,如果场景中动画较多,那不仅开销大,同步也有可能有问题。搜了下看到一个仓库, 测试了下效果不错,还适配了 sprite
类,没有额外的开销,这次就来看下它的源码。
仓库截图:
GPU 拾取原理
拾取是为了识别当前鼠标位置的 3D
物体,传统的 CPU
拾取做法就是转换点击位置坐标,生成射线和场景求交。而 GPU
拾取的原理比较简单,直接是把当前场景渲染结果(颜色)替换成 ID,读取这个位置的 Buffer
,这样就可以得到物体的 ID
了,相当于给物体渲染出来的图像位置的输出(本来是颜色),换成了它的(ID)。
简单画下原理图:
这里一个关键点是是用 onAfterRender
来获取刚渲染完的渲染对象,用来替换着色器。
let emptyScene = new THREE.Scene();
emptyScene.onAfterRender = renderList;
这个时候通过以下代码获取渲染对象,就可以替换着色器了。
let renderList = renderer.renderLists.get(scene, 0);
代码解析
拾取的时候(鼠标按下或抬起):
this.pick = function (x, y) {
let w = renderer.domElement.width;
let h = renderer.domElement.height;
// Set the projection matrix to only look at the pixel we are interested in.
camera.setViewOffset(w, h, x, y, 1, 1);
let currRenderTarget = renderer.getRenderTarget();
// 换成 id 着色器后渲染到 pickingTarget 上面
renderer.setRenderTarget(pickingTarget);
renderer.render(emptyScene, camera);
renderer.readRenderTargetPixels(pickingTarget, 0, 0, pickingTarget.width, pickingTarget.height, pixelBuffer);
renderer.setRenderTarget(currRenderTarget);
camera.clearViewOffset();
// 可以看后面代码里对 id 输入着色器的编码,这里把 id 值还原
let val = (pixelBuffer[0] << 24) + (pixelBuffer[1] << 16) + (pixelBuffer[2] << 8) + pixelBuffer[3];
return val;
};
状态处理和输出带 id
的 buffer
:
function renderList() {
// This is the magic, these render lists are still filled with valid data. So we can
// submit them again for picking and save lots of work!
// 默认的渲染列表
let renderList = renderer.renderLists.get(scene, 0);
renderList.opaque.forEach(processItem);
renderList.transmissive.forEach(processItem);
renderList.transparent.forEach(processItem);
}
function processItem(renderItem) {
let object = renderItem.object;
let objId = object.id;
let material = renderItem.material;
let geometry = renderItem.geometry;
let useMorphing = 0;
if (material.morphTargets === true) {
if (geometry.isBufferGeometry === true) {
useMorphing =
geometry.morphAttributes && geometry.morphAttributes.position && geometry.morphAttributes.position.length > 0
? 1
: 0;
} else if (geometry.isGeometry === true) {
useMorphing = geometry.morphTargets && geometry.morphTargets.length > 0 ? 1 : 0;
}
}
let useSkinning = object.isSkinnedMesh ? 1 : 0;
let useInstancing = object.isInstancedMesh === true ? 1 : 0;
let frontSide = material.side === THREE.FrontSide ? 1 : 0;
let backSide = material.side === THREE.BackSide ? 1 : 0;
let doubleSide = material.side === THREE.DoubleSide ? 1 : 0;
let sprite = material.type === 'SpriteMaterial' ? 1 : 0;
// 控制精灵大小是否随着距离摄像机的远近而变化
let sizeAttenuation = material.sizeAttenuation ? 1 : 0;
let index = (useMorphing << 0) |
(useSkinning << 1) |
(useInstancing << 2) |
(frontSide << 3) |
(backSide << 4) |
(doubleSide << 5) |
(sprite << 6) |
(sizeAttenuation << 7);
let renderMaterial = renderItem.object.pickingMaterial ? renderItem.object.pickingMaterial : materialCache[index];
if (!renderMaterial) {
let vertexShader = THREE.ShaderChunk.meshbasic_vert;
if (sprite) {
vertexShader = THREE.ShaderChunk.sprite_vert;
if (sizeAttenuation) vertexShader = '#define USE_SIZEATTENUATION\n\n' + vertexShader;
}
renderMaterial = new THREE.ShaderMaterial({
vertexShader: vertexShader,
fragmentShader: /* */`
uniform vec4 objectId;
void main() {
gl_FragColor = objectId;
}
`,
side: material.side,
});
renderMaterial.skinning = useSkinning > 0,
renderMaterial.morphTargets = useMorphing > 0,
renderMaterial.uniforms = {
objectId: { value: [1.0, 1.0, 1.0, 1.0] },
};
materialCache[index] = renderMaterial;
}
if (sprite) {
renderMaterial.uniforms.rotation = { value: material.rotation };
renderMaterial.uniforms.center = { value: object.center };
}
// 这个为了输出片元着色器 4 个分量做的数值处理
renderMaterial.uniforms.objectId.value = [
(objId >> 24 & 255) / 255,
(objId >> 16 & 255) / 255,
(objId >> 8 & 255) / 255,
(objId & 255) / 255,
];
renderMaterial.uniformsNeedUpdate = true;
renderer.renderBufferDirect(camera, null, geometry, renderMaterial, object, null);
}