Skip to content

Commit 0e004ba

Browse files
committed
WebGPURenderer: Implement GGX VNDF importance sampling for PMREM
Port the GGX VNDF (Visible Normal Distribution Function) importance sampling implementation from WebGLRenderer's PMREM to WebGPURenderer's TSL-based PMREM generator. This implementation provides more accurate environment map prefiltering by using Monte Carlo integration with VNDF importance sampling to represent the GGX BRDF for physically-based rendering. Changes to PMREMUtils.js: - Added GGX VNDF sampling helper functions: - radicalInverse_VdC: Van der Corput radical inverse - hammersley: Hammersley sequence for quasi-Monte Carlo sampling - importanceSampleGGX_VNDF: GGX VNDF importance sampling (Heitz 2018) - ggxConvolution: Main convolution function using VNDF sampling Changes to PMREMGenerator.js: - Added GGX_SAMPLES constant (2048 samples) - Removed _axisDirections (no longer needed with GGX filtering) - Added _ggxMaterial property - Replaced blur-based _applyPMREM with GGX filtering - Added _applyGGXFilter method for incremental roughness filtering - Added _getGGXShader function to create GGX material - Updated documentation to reflect GGX VNDF usage Technical notes: - Uses texture() instead of texture(null) for EmptyTexture default - Helper functions don't use setLayout (TSL pattern for nested Fn) - Implements incremental roughness filtering to avoid over-blurring - Applies blur strength mapping (0.05 + roughness * 0.95) for quality - Performs two-pass rendering: pingPong target then back to cubeUV
1 parent ca2f49e commit 0e004ba

File tree

2 files changed

+234
-36
lines changed

2 files changed

+234
-36
lines changed

src/nodes/pmrem/PMREMUtils.js

Lines changed: 118 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
import { Fn, int, float, vec2, vec3, vec4, If } from '../tsl/TSLBase.js';
2-
import { cos, sin, abs, max, exp2, log2, clamp, fract, mix, floor, normalize, cross } from '../math/MathNode.js';
3-
import { mul } from '../math/OperatorNode.js';
1+
import { Fn, int, uint, float, vec2, vec3, vec4, If } from '../tsl/TSLBase.js';
2+
import { cos, sin, abs, max, min, exp2, log2, clamp, fract, mix, floor, normalize, cross, dot, sqrt, length } from '../math/MathNode.js';
3+
import { mul, add, sub } from '../math/OperatorNode.js';
44
import { select } from '../math/ConditionalNode.js';
55
import { Loop, Break } from '../utils/LoopNode.js';
66

@@ -286,3 +286,118 @@ export const blur = /*@__PURE__*/ Fn( ( { n, latitudinal, poleAxis, outputDirect
286286
return vec4( gl_FragColor, 1 );
287287

288288
} );
289+
290+
// GGX VNDF importance sampling functions
291+
292+
// Van der Corput radical inverse for generating quasi-random sequences
293+
const radicalInverse_VdC = /*@__PURE__*/ Fn( ( [ bits_immutable ] ) => {
294+
295+
const bits = uint( bits_immutable ).toVar();
296+
bits.assign( bits.shiftLeft( uint( 16 ) ).bitOr( bits.shiftRight( uint( 16 ) ) ) );
297+
bits.assign( bits.bitAnd( uint( 0x55555555 ) ).shiftLeft( uint( 1 ) ).bitOr( bits.bitAnd( uint( 0xAAAAAAAA ) ).shiftRight( uint( 1 ) ) ) );
298+
bits.assign( bits.bitAnd( uint( 0x33333333 ) ).shiftLeft( uint( 2 ) ).bitOr( bits.bitAnd( uint( 0xCCCCCCCC ) ).shiftRight( uint( 2 ) ) ) );
299+
bits.assign( bits.bitAnd( uint( 0x0F0F0F0F ) ).shiftLeft( uint( 4 ) ).bitOr( bits.bitAnd( uint( 0xF0F0F0F0 ) ).shiftRight( uint( 4 ) ) ) );
300+
bits.assign( bits.bitAnd( uint( 0x00FF00FF ) ).shiftLeft( uint( 8 ) ).bitOr( bits.bitAnd( uint( 0xFF00FF00 ) ).shiftRight( uint( 8 ) ) ) );
301+
return float( bits ).mul( 2.3283064365386963e-10 ); // / 0x100000000
302+
303+
} );
304+
305+
// Hammersley sequence for quasi-Monte Carlo sampling
306+
const hammersley = /*@__PURE__*/ Fn( ( [ i, N ] ) => {
307+
308+
return vec2( float( i ).div( float( N ) ), radicalInverse_VdC( i ) );
309+
310+
} );
311+
312+
// GGX VNDF importance sampling (Eric Heitz 2018)
313+
// "Sampling the GGX Distribution of Visible Normals"
314+
// https://jcgt.org/published/0007/04/01/
315+
const importanceSampleGGX_VNDF = /*@__PURE__*/ Fn( ( [ Xi, V_immutable, roughness_immutable ] ) => {
316+
317+
const V = vec3( V_immutable ).toVar();
318+
const roughness = float( roughness_immutable );
319+
const alpha = roughness.mul( roughness ).toVar();
320+
321+
// Section 3.2: Transform view direction to hemisphere configuration
322+
const Vh = normalize( vec3( alpha.mul( V.x ), alpha.mul( V.y ), V.z ) ).toVar();
323+
324+
// Section 4.1: Orthonormal basis
325+
const lensq = Vh.x.mul( Vh.x ).add( Vh.y.mul( Vh.y ) ).toVar();
326+
const T1 = select( lensq.greaterThan( 0.0 ), vec3( Vh.y.negate(), Vh.x, 0.0 ).div( sqrt( lensq ) ), vec3( 1.0, 0.0, 0.0 ) ).toVar();
327+
const T2 = cross( Vh, T1 ).toVar();
328+
329+
// Section 4.2: Parameterization of projected area
330+
const r = sqrt( Xi.x );
331+
const phi = mul( 2.0, 3.14159265359 ).mul( Xi.y );
332+
const t1 = r.mul( cos( phi ) ).toVar();
333+
const t2 = r.mul( sin( phi ) ).toVar();
334+
const s = mul( 0.5, Vh.z.add( 1.0 ) ).toVar();
335+
t2.assign( s.oneMinus().mul( sqrt( t1.mul( t1 ).oneMinus() ) ).add( s.mul( t2 ) ) );
336+
337+
// Section 4.3: Reprojection onto hemisphere
338+
const Nh = T1.mul( t1 ).add( T2.mul( t2 ) ).add( Vh.mul( sqrt( max( 0.0, t1.mul( t1 ).add( t2.mul( t2 ) ).oneMinus() ) ) ) ).toVar();
339+
340+
// Section 3.4: Transform back to ellipsoid configuration
341+
return normalize( vec3( alpha.mul( Nh.x ), alpha.mul( Nh.y ), max( 0.0, Nh.z ) ) );
342+
343+
} );
344+
345+
// GGX convolution using VNDF importance sampling
346+
export const ggxConvolution = /*@__PURE__*/ Fn( ( { roughness, mipInt, envMap, N_immutable, GGX_SAMPLES, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP } ) => {
347+
348+
const N = vec3( N_immutable ).toVar();
349+
350+
const prefilteredColor = vec3( 0.0 ).toVar();
351+
const totalWeight = float( 0.0 ).toVar();
352+
353+
// For very low roughness, just sample the environment directly
354+
If( roughness.lessThan( 0.001 ), () => {
355+
356+
prefilteredColor.assign( bilinearCubeUV( envMap, N, mipInt, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP ) );
357+
358+
} ).Else( () => {
359+
360+
// Tangent space basis for VNDF sampling
361+
const up = select( abs( N.z ).lessThan( 0.999 ), vec3( 0.0, 0.0, 1.0 ), vec3( 1.0, 0.0, 0.0 ) ).toVar();
362+
const tangent = normalize( cross( up, N ) ).toVar();
363+
const bitangent = cross( N, tangent ).toVar();
364+
365+
Loop( { start: uint( 0 ), end: GGX_SAMPLES }, ( { i } ) => {
366+
367+
const Xi = hammersley( i, GGX_SAMPLES );
368+
369+
// For PMREM, V = N, so in tangent space V is always (0, 0, 1)
370+
const H_tangent = importanceSampleGGX_VNDF( Xi, vec3( 0.0, 0.0, 1.0 ), roughness );
371+
372+
// Transform H back to world space
373+
const H = normalize( tangent.mul( H_tangent.x ).add( bitangent.mul( H_tangent.y ) ).add( N.mul( H_tangent.z ) ) );
374+
const L = normalize( H.mul( dot( N, H ).mul( 2.0 ) ).sub( N ) );
375+
376+
const NdotL = max( dot( N, L ), 0.0 );
377+
378+
If( NdotL.greaterThan( 0.0 ), () => {
379+
380+
// Sample environment at fixed mip level
381+
// VNDF importance sampling handles the distribution filtering
382+
const sampleColor = bilinearCubeUV( envMap, L, mipInt, CUBEUV_TEXEL_WIDTH, CUBEUV_TEXEL_HEIGHT, CUBEUV_MAX_MIP );
383+
384+
// Weight by NdotL for the split-sum approximation
385+
// VNDF PDF naturally accounts for the visible microfacet distribution
386+
prefilteredColor.addAssign( sampleColor.mul( NdotL ) );
387+
totalWeight.addAssign( NdotL );
388+
389+
} );
390+
391+
} );
392+
393+
If( totalWeight.greaterThan( 0.0 ), () => {
394+
395+
prefilteredColor.assign( prefilteredColor.div( totalWeight ) );
396+
397+
} );
398+
399+
} );
400+
401+
return vec4( prefilteredColor, 1.0 );
402+
403+
} );

src/renderers/common/extras/PMREMGenerator.js

Lines changed: 116 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import NodeMaterial from '../../../materials/nodes/NodeMaterial.js';
2-
import { getDirection, blur } from '../../../nodes/pmrem/PMREMUtils.js';
2+
import { getDirection, blur, ggxConvolution } from '../../../nodes/pmrem/PMREMUtils.js';
33
import { equirectUV } from '../../../nodes/utils/EquirectUV.js';
44
import { uniform } from '../../../nodes/core/UniformNode.js';
55
import { uniformArray } from '../../../nodes/accessors/UniformArrayNode.js';
66
import { texture } from '../../../nodes/accessors/TextureNode.js';
77
import { cubeTexture } from '../../../nodes/accessors/CubeTextureNode.js';
8-
import { float, vec3 } from '../../../nodes/tsl/TSLBase.js';
8+
import { float, uint, vec3 } from '../../../nodes/tsl/TSLBase.js';
99
import { uv } from '../../../nodes/accessors/UV.js';
1010
import { attribute } from '../../../nodes/core/AttributeNode.js';
1111

@@ -34,42 +34,25 @@ import { warn, error, warnOnce } from '../../../utils.js';
3434

3535
const LOD_MIN = 4;
3636

37-
// The standard deviations (radians) associated with the extra mips. These are
38-
// chosen to approximate a Trowbridge-Reitz distribution function times the
39-
// geometric shadowing function. These sigma values squared must match the
40-
// variance #defines in cube_uv_reflection_fragment.glsl.js.
37+
// The standard deviations (radians) associated with the extra mips.
38+
// Used for scene blur in fromScene() method.
4139
const EXTRA_LOD_SIGMA = [ 0.125, 0.215, 0.35, 0.446, 0.526, 0.582 ];
4240

4341
// The maximum length of the blur for loop. Smaller sigmas will use fewer
4442
// samples and exit early, but not recompile the shader.
43+
// Used for scene blur in fromScene() method.
4544
const MAX_SAMPLES = 20;
4645

46+
// GGX VNDF importance sampling configuration
47+
const GGX_SAMPLES = 1024;
48+
4749
const _flatCamera = /*@__PURE__*/ new OrthographicCamera( - 1, 1, 1, - 1, 0, 1 );
4850
const _cubeCamera = /*@__PURE__*/ new PerspectiveCamera( 90, 1 );
4951
const _clearColor = /*@__PURE__*/ new Color();
5052
let _oldTarget = null;
5153
let _oldActiveCubeFace = 0;
5254
let _oldActiveMipmapLevel = 0;
5355

54-
// Golden Ratio
55-
const PHI = ( 1 + Math.sqrt( 5 ) ) / 2;
56-
const INV_PHI = 1 / PHI;
57-
58-
// Vertices of a dodecahedron (except the opposites, which represent the
59-
// same axis), used as axis directions evenly spread on a sphere.
60-
const _axisDirections = [
61-
/*@__PURE__*/ new Vector3( - PHI, INV_PHI, 0 ),
62-
/*@__PURE__*/ new Vector3( PHI, INV_PHI, 0 ),
63-
/*@__PURE__*/ new Vector3( - INV_PHI, 0, PHI ),
64-
/*@__PURE__*/ new Vector3( INV_PHI, 0, PHI ),
65-
/*@__PURE__*/ new Vector3( 0, PHI, - INV_PHI ),
66-
/*@__PURE__*/ new Vector3( 0, PHI, INV_PHI ),
67-
/*@__PURE__*/ new Vector3( - 1, 1, - 1 ),
68-
/*@__PURE__*/ new Vector3( 1, 1, - 1 ),
69-
/*@__PURE__*/ new Vector3( - 1, 1, 1 ),
70-
/*@__PURE__*/ new Vector3( 1, 1, 1 )
71-
];
72-
7356
const _origin = /*@__PURE__*/ new Vector3();
7457

7558
// maps blur materials to their uniforms dictionary
@@ -96,9 +79,11 @@ const _outputDirection = /*@__PURE__*/ vec3( _direction.x, _direction.y, _direct
9679
* higher roughness levels. In this way we maintain resolution to smoothly
9780
* interpolate diffuse lighting while limiting sampling computation.
9881
*
99-
* Paper: Fast, Accurate Image-Based Lighting:
100-
* {@link https://drive.google.com/file/d/15y8r_UpKlU9SvV4ILb0C3qCPecS8pvLz/view}
101-
*/
82+
* The prefiltering uses GGX VNDF (Visible Normal Distribution Function)
83+
* importance sampling based on "Sampling the GGX Distribution of Visible Normals"
84+
* (Heitz, 2018) to generate environment maps that accurately match the GGX BRDF
85+
* used in material rendering for physically-based image-based lighting.
86+
*/
10287
class PMREMGenerator {
10388

10489
/**
@@ -119,6 +104,7 @@ class PMREMGenerator {
119104
this._lodMeshes = [];
120105

121106
this._blurMaterial = null;
107+
this._ggxMaterial = null;
122108
this._cubemapMaterial = null;
123109
this._equirectMaterial = null;
124110
this._backgroundBox = null;
@@ -408,6 +394,7 @@ class PMREMGenerator {
408394
_dispose() {
409395

410396
if ( this._blurMaterial !== null ) this._blurMaterial.dispose();
397+
if ( this._ggxMaterial !== null ) this._ggxMaterial.dispose();
411398

412399
if ( this._pingPongRenderTarget !== null ) this._pingPongRenderTarget.dispose();
413400

@@ -632,17 +619,80 @@ class PMREMGenerator {
632619
renderer.autoClear = false;
633620
const n = this._lodPlanes.length;
634621

622+
// Use GGX VNDF importance sampling
635623
for ( let i = 1; i < n; i ++ ) {
636624

637-
const sigma = Math.sqrt( this._sigmas[ i ] * this._sigmas[ i ] - this._sigmas[ i - 1 ] * this._sigmas[ i - 1 ] );
625+
this._applyGGXFilter( cubeUVRenderTarget, i - 1, i );
626+
627+
}
628+
629+
renderer.autoClear = autoClear;
630+
631+
}
632+
633+
/**
634+
* Applies GGX VNDF importance sampling filter to generate a prefiltered environment map.
635+
* Uses Monte Carlo integration with VNDF importance sampling to accurately represent the
636+
* GGX BRDF for physically-based rendering. Reads from the previous LOD level and
637+
* applies incremental roughness filtering to avoid over-blurring.
638+
*
639+
* @private
640+
* @param {RenderTarget} cubeUVRenderTarget
641+
* @param {number} lodIn - Source LOD level to read from
642+
* @param {number} lodOut - Target LOD level to write to
643+
*/
644+
_applyGGXFilter( cubeUVRenderTarget, lodIn, lodOut ) {
645+
646+
const renderer = this._renderer;
647+
const pingPongRenderTarget = this._pingPongRenderTarget;
638648

639-
const poleAxis = _axisDirections[ ( n - i - 1 ) % _axisDirections.length ];
649+
// Lazy create GGX material only when first used
650+
if ( this._ggxMaterial === null ) {
640651

641-
this._blur( cubeUVRenderTarget, i - 1, i, sigma, poleAxis );
652+
this._ggxMaterial = _getGGXShader( this._lodMax, this._pingPongRenderTarget.width, this._pingPongRenderTarget.height );
642653

643654
}
644655

645-
renderer.autoClear = autoClear;
656+
const ggxMaterial = this._ggxMaterial;
657+
const ggxMesh = this._lodMeshes[ lodOut ];
658+
ggxMesh.material = ggxMaterial;
659+
660+
const ggxUniforms = _uniformsMap.get( ggxMaterial );
661+
662+
// Calculate incremental roughness between LOD levels
663+
const targetRoughness = lodOut / ( this._lodPlanes.length - 1 );
664+
const sourceRoughness = lodIn / ( this._lodPlanes.length - 1 );
665+
const incrementalRoughness = Math.sqrt( targetRoughness * targetRoughness - sourceRoughness * sourceRoughness );
666+
667+
// Apply blur strength mapping for better quality across the roughness range
668+
const blurStrength = 0.05 + targetRoughness * 0.95;
669+
const adjustedRoughness = incrementalRoughness * blurStrength;
670+
671+
// Calculate viewport position based on output LOD level
672+
const { _lodMax } = this;
673+
const outputSize = this._sizeLods[ lodOut ];
674+
const x = 3 * outputSize * ( lodOut > _lodMax - LOD_MIN ? lodOut - _lodMax + LOD_MIN : 0 );
675+
const y = 4 * ( this._cubeSize - outputSize );
676+
677+
// Read from previous LOD with incremental roughness
678+
cubeUVRenderTarget.texture.frame = ( cubeUVRenderTarget.texture.frame || 0 ) + 1;
679+
ggxUniforms.envMap.value = cubeUVRenderTarget.texture;
680+
ggxUniforms.roughness.value = adjustedRoughness;
681+
ggxUniforms.mipInt.value = _lodMax - lodIn; // Sample from input LOD
682+
683+
_setViewport( pingPongRenderTarget, x, y, 3 * outputSize, 2 * outputSize );
684+
renderer.setRenderTarget( pingPongRenderTarget );
685+
renderer.render( ggxMesh, _flatCamera );
686+
687+
// Copy from pingPong back to cubeUV (simple direct copy)
688+
pingPongRenderTarget.texture.frame = ( pingPongRenderTarget.texture.frame || 0 ) + 1;
689+
ggxUniforms.envMap.value = pingPongRenderTarget.texture;
690+
ggxUniforms.roughness.value = 0.0; // Direct copy
691+
ggxUniforms.mipInt.value = _lodMax - lodOut; // Read from the level we just wrote
692+
693+
_setViewport( cubeUVRenderTarget, x, y, 3 * outputSize, 2 * outputSize );
694+
renderer.setRenderTarget( cubeUVRenderTarget );
695+
renderer.render( ggxMesh, _flatCamera );
646696

647697
}
648698

@@ -653,6 +703,8 @@ class PMREMGenerator {
653703
* the poles) to approximate the orthogonally-separable blur. It is least
654704
* accurate at the poles, but still does a decent job.
655705
*
706+
* Used for initial scene blur in fromScene() method when sigma > 0.
707+
*
656708
* @private
657709
* @param {RenderTarget} cubeUVRenderTarget - The cubemap render target.
658710
* @param {number} lodIn - The input level-of-detail.
@@ -904,7 +956,7 @@ function _getBlurShader( lodMax, width, height ) {
904956
const n = float( MAX_SAMPLES );
905957
const latitudinal = uniform( 0 ); // false, bool
906958
const samples = uniform( 1 ); // int
907-
const envMap = texture( null );
959+
const envMap = texture();
908960
const mipInt = uniform( 0 ); // int
909961
const CUBEUV_TEXEL_WIDTH = float( 1 / width );
910962
const CUBEUV_TEXEL_HEIGHT = float( 1 / height );
@@ -934,6 +986,37 @@ function _getBlurShader( lodMax, width, height ) {
934986

935987
}
936988

989+
function _getGGXShader( lodMax, width, height ) {
990+
991+
const envMap = texture();
992+
const roughness = uniform( 0 );
993+
const mipInt = uniform( 0 );
994+
const CUBEUV_TEXEL_WIDTH = float( 1 / width );
995+
const CUBEUV_TEXEL_HEIGHT = float( 1 / height );
996+
const CUBEUV_MAX_MIP = float( lodMax );
997+
998+
const materialUniforms = {
999+
envMap,
1000+
roughness,
1001+
mipInt,
1002+
CUBEUV_TEXEL_WIDTH,
1003+
CUBEUV_TEXEL_HEIGHT,
1004+
CUBEUV_MAX_MIP
1005+
};
1006+
1007+
const material = _getMaterial( 'ggx' );
1008+
material.fragmentNode = ggxConvolution( {
1009+
...materialUniforms,
1010+
N_immutable: _outputDirection,
1011+
GGX_SAMPLES: uint( GGX_SAMPLES )
1012+
} );
1013+
1014+
_uniformsMap.set( material, materialUniforms );
1015+
1016+
return material;
1017+
1018+
}
1019+
9371020
function _getCubemapMaterial( envTexture ) {
9381021

9391022
const material = _getMaterial( 'cubemap' );

0 commit comments

Comments
 (0)