Skip to content

Commit 2a2cec9

Browse files
authored
Merge pull request #39 from numericalEFT/pchou
update readme and license
2 parents 43a9fe4 + 3c84972 commit 2a2cec9

File tree

8 files changed

+541
-86
lines changed

8 files changed

+541
-86
lines changed

MCintegration/base.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,8 @@
99
from MCintegration.utils import get_device
1010

1111
# Constants for numerical stability
12-
# Small but safe non-zero value
13-
MINVAL = 10 ** (sys.float_info.min_10_exp + 50)
14-
MAXVAL = 10 ** (sys.float_info.max_10_exp - 50) # Large but safe value
1512
EPSILON = 1e-16 # Small value to ensure numerical stability
13+
# EPSILON = sys.float_info.epsilon * 1e4 # Small value to ensure numerical stability
1614

1715

1816
class BaseDistribution(nn.Module):
@@ -98,10 +96,8 @@ def sample(self, batch_size=1, **kwargs):
9896
tuple: (uniform samples, log_det_jacobian=0)
9997
"""
10098
# torch.manual_seed(0) # test seed
101-
u = torch.rand((batch_size, self.dim),
102-
device=self.device, dtype=self.dtype)
103-
log_detJ = torch.zeros(
104-
batch_size, device=self.device, dtype=self.dtype)
99+
u = torch.rand((batch_size, self.dim), device=self.device, dtype=self.dtype)
100+
log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype)
105101
return u, log_detJ
106102

107103

@@ -133,16 +129,14 @@ def __init__(self, A, b, device=None, dtype=torch.float32):
133129
elif isinstance(A, torch.Tensor):
134130
self.A = A.to(dtype=self.dtype, device=self.device)
135131
else:
136-
raise ValueError(
137-
"'A' must be a list, numpy array, or torch tensor.")
132+
raise ValueError("'A' must be a list, numpy array, or torch tensor.")
138133

139134
if isinstance(b, (list, np.ndarray)):
140135
self.b = torch.tensor(b, dtype=self.dtype, device=self.device)
141136
elif isinstance(b, torch.Tensor):
142137
self.b = b.to(dtype=self.dtype, device=self.device)
143138
else:
144-
raise ValueError(
145-
"'b' must be a list, numpy array, or torch tensor.")
139+
raise ValueError("'b' must be a list, numpy array, or torch tensor.")
146140

147141
# Pre-compute determinant of Jacobian for efficiency
148142
self._detJ = torch.prod(self.A)

MCintegration/maps.py

Lines changed: 26 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
from MCintegration.utils import get_device
1010
import sys
1111

12-
TINY = 10 ** (sys.float_info.min_10_exp + 50) # Small but safe non-zero value
12+
# TINY = 10 ** (sys.float_info.min_10_exp + 50) # Small but safe non-zero value
13+
TINY = 1e-45
1314

1415

1516
class Configuration:
@@ -38,14 +39,10 @@ def __init__(self, batch_size, dim, f_dim, device=None, dtype=torch.float32):
3839
self.f_dim = f_dim
3940
self.batch_size = batch_size
4041
# Initialize tensors for storing samples and results
41-
self.u = torch.empty(
42-
(batch_size, dim), dtype=dtype, device=self.device)
43-
self.x = torch.empty(
44-
(batch_size, dim), dtype=dtype, device=self.device)
45-
self.fx = torch.empty((batch_size, f_dim),
46-
dtype=dtype, device=self.device)
47-
self.weight = torch.empty(
48-
(batch_size,), dtype=dtype, device=self.device)
42+
self.u = torch.empty((batch_size, dim), dtype=dtype, device=self.device)
43+
self.x = torch.empty((batch_size, dim), dtype=dtype, device=self.device)
44+
self.fx = torch.empty((batch_size, f_dim), dtype=dtype, device=self.device)
45+
self.weight = torch.empty((batch_size,), dtype=dtype, device=self.device)
4946
self.detJ = torch.empty((batch_size,), dtype=dtype, device=self.device)
5047

5148

@@ -202,8 +199,7 @@ def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32):
202199
(self.dim,), ninc, dtype=torch.int32, device=self.device
203200
)
204201
elif isinstance(ninc, (list, np.ndarray)):
205-
self.ninc = torch.tensor(
206-
ninc, dtype=torch.int32, device=self.device)
202+
self.ninc = torch.tensor(ninc, dtype=torch.int32, device=self.device)
207203
elif isinstance(ninc, torch.Tensor):
208204
self.ninc = ninc.to(dtype=torch.int32, device=self.device)
209205
else:
@@ -223,8 +219,9 @@ def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32):
223219
self.sum_f = torch.zeros(
224220
self.dim, self.max_ninc, dtype=self.dtype, device=self.device
225221
)
226-
self.n_f = torch.zeros(
227-
self.dim, self.max_ninc, dtype=self.dtype, device=self.device
222+
self.n_f = (
223+
torch.zeros(self.dim, self.max_ninc, dtype=self.dtype, device=self.device)
224+
+ TINY
228225
)
229226
self.avg_f = torch.ones(
230227
(self.dim, self.max_ninc), dtype=self.dtype, device=self.device
@@ -308,19 +305,16 @@ def adapt(self, alpha=0.5):
308305
"""
309306
# Aggregate training data across distributed processes if applicable
310307
if torch.distributed.is_initialized():
311-
torch.distributed.all_reduce(
312-
self.sum_f, op=torch.distributed.ReduceOp.SUM)
313-
torch.distributed.all_reduce(
314-
self.n_f, op=torch.distributed.ReduceOp.SUM)
308+
torch.distributed.all_reduce(self.sum_f, op=torch.distributed.ReduceOp.SUM)
309+
torch.distributed.all_reduce(self.n_f, op=torch.distributed.ReduceOp.SUM)
315310

316311
# Initialize a new grid tensor
317312
new_grid = torch.empty(
318313
(self.dim, self.max_ninc + 1), dtype=self.dtype, device=self.device
319314
)
320315

321316
if alpha > 0:
322-
tmp_f = torch.empty(
323-
self.max_ninc, dtype=self.dtype, device=self.device)
317+
tmp_f = torch.empty(self.max_ninc, dtype=self.dtype, device=self.device)
324318

325319
# avg_f = torch.ones(self.inc.shape[1], dtype=self.dtype, device=self.device)
326320
# print(self.ninc.shape, self.dim)
@@ -338,14 +332,12 @@ def adapt(self, alpha=0.5):
338332

339333
if alpha > 0:
340334
# Smooth avg_f
341-
tmp_f[0] = (7.0 * avg_f[0] + avg_f[1]
342-
).abs() / 8.0 # Shape: ()
335+
tmp_f[0] = (7.0 * avg_f[0] + avg_f[1]).abs() / 8.0 # Shape: ()
343336
tmp_f[ninc - 1] = (
344337
7.0 * avg_f[ninc - 1] + avg_f[ninc - 2]
345338
).abs() / 8.0 # Shape: ()
346-
tmp_f[1: ninc - 1] = (
347-
6.0 * avg_f[1: ninc - 1] +
348-
avg_f[: ninc - 2] + avg_f[2:ninc]
339+
tmp_f[1 : ninc - 1] = (
340+
6.0 * avg_f[1 : ninc - 1] + avg_f[: ninc - 2] + avg_f[2:ninc]
349341
).abs() / 8.0
350342

351343
# Normalize tmp_f to ensure the sum is 1
@@ -393,8 +385,7 @@ def adapt(self, alpha=0.5):
393385
) / avg_f_relevant
394386

395387
# Calculate the new grid points using vectorized operations
396-
new_grid[d, 1:ninc] = grid_left + \
397-
fractional_positions * inc_relevant
388+
new_grid[d, 1:ninc] = grid_left + fractional_positions * inc_relevant
398389
else:
399390
# If alpha == 0 or no training data, retain the existing grid
400391
new_grid[d, :] = self.grid[d, :]
@@ -407,8 +398,7 @@ def adapt(self, alpha=0.5):
407398
self.inc.zero_() # Reset increments to zero
408399
for d in range(self.dim):
409400
self.inc[d, : self.ninc[d]] = (
410-
self.grid[d, 1: self.ninc[d] + 1] -
411-
self.grid[d, : self.ninc[d]]
401+
self.grid[d, 1 : self.ninc[d] + 1] - self.grid[d, : self.ninc[d]]
412402
)
413403

414404
# Clear accumulated training data for the next adaptation cycle
@@ -432,8 +422,7 @@ def make_uniform(self):
432422
device=self.device,
433423
)
434424
self.inc[d, : self.ninc[d]] = (
435-
self.grid[d, 1: self.ninc[d] + 1] -
436-
self.grid[d, : self.ninc[d]]
425+
self.grid[d, 1 : self.ninc[d] + 1] - self.grid[d, : self.ninc[d]]
437426
)
438427
self.clear()
439428

@@ -459,8 +448,7 @@ def forward(self, u):
459448

460449
batch_size = u.size(0)
461450
# Clamp iu to [0, ninc-1] to handle out-of-bounds indices
462-
min_tensor = torch.zeros(
463-
(1, self.dim), dtype=iu.dtype, device=self.device)
451+
min_tensor = torch.zeros((1, self.dim), dtype=iu.dtype, device=self.device)
464452
# Shape: (1, dim)
465453
max_tensor = (self.ninc - 1).unsqueeze(0).to(iu.dtype)
466454
iu_clamped = torch.clamp(iu, min=min_tensor, max=max_tensor)
@@ -471,8 +459,7 @@ def forward(self, u):
471459
grid_gather = torch.gather(grid_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(
472460
2
473461
) # Shape: (batch_size, dim)
474-
inc_gather = torch.gather(
475-
inc_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(2)
462+
inc_gather = torch.gather(inc_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(2)
476463

477464
x = grid_gather + inc_gather * du_ninc
478465
log_detJ = (inc_gather * self.ninc).log_().sum(dim=1)
@@ -484,17 +471,15 @@ def forward(self, u):
484471
# For each sample and dimension, set x to grid[d, ninc[d]]
485472
# and log_detJ += log(inc[d, ninc[d]-1] * ninc[d])
486473
boundary_grid = (
487-
self.grid[torch.arange(
488-
self.dim, device=self.device), self.ninc]
474+
self.grid[torch.arange(self.dim, device=self.device), self.ninc]
489475
.unsqueeze(0)
490476
.expand(batch_size, -1)
491477
)
492478
# x = torch.where(out_of_bounds, boundary_grid, x)
493479
x[out_of_bounds] = boundary_grid[out_of_bounds]
494480

495481
boundary_inc = (
496-
self.inc[torch.arange(
497-
self.dim, device=self.device), self.ninc - 1]
482+
self.inc[torch.arange(self.dim, device=self.device), self.ninc - 1]
498483
.unsqueeze(0)
499484
.expand(batch_size, -1)
500485
)
@@ -522,8 +507,7 @@ def inverse(self, x):
522507

523508
# Initialize output tensors
524509
u = torch.empty_like(x)
525-
log_detJ = torch.zeros(
526-
batch_size, device=self.device, dtype=self.dtype)
510+
log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype)
527511

528512
# Loop over each dimension to perform inverse mapping
529513
for d in range(dim):
@@ -537,8 +521,7 @@ def inverse(self, x):
537521
# Perform searchsorted to find indices where x should be inserted to maintain order
538522
# torch.searchsorted returns indices in [0, max_ninc +1]
539523
iu = (
540-
torch.searchsorted(
541-
grid_d, x[:, d].contiguous(), right=True) - 1
524+
torch.searchsorted(grid_d, x[:, d].contiguous(), right=True) - 1
542525
) # Shape: (batch_size,)
543526

544527
# Clamp indices to [0, ninc_d - 1] to ensure they are within valid range
@@ -551,8 +534,7 @@ def inverse(self, x):
551534
inc_gather = inc_d[iu_clamped] # Shape: (batch_size,)
552535

553536
# Compute du: fractional part within the increment
554-
du = (x[:, d] - grid_gather) / \
555-
(inc_gather + TINY) # Shape: (batch_size,)
537+
du = (x[:, d] - grid_gather) / (inc_gather + TINY) # Shape: (batch_size,)
556538

557539
# Compute u for dimension d
558540
u[:, d] = (du + iu_clamped) / ninc_d # Shape: (batch_size,)

MCintegration/maps_test.py

Lines changed: 102 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,45 @@
11
import unittest
22
import torch
3-
4-
# import numpy as np
3+
import numpy as np
54
from maps import Map, CompositeMap, Vegas, Configuration
65
from base import LinearMap
76

87

8+
class TestConfiguration(unittest.TestCase):
9+
def setUp(self):
10+
self.batch_size = 5
11+
self.dim = 3
12+
self.f_dim = 2
13+
self.device = "cpu"
14+
self.dtype = torch.float64
15+
16+
def test_configuration_initialization(self):
17+
config = Configuration(
18+
batch_size=self.batch_size,
19+
dim=self.dim,
20+
f_dim=self.f_dim,
21+
device=self.device,
22+
dtype=self.dtype,
23+
)
24+
25+
self.assertEqual(config.batch_size, self.batch_size)
26+
self.assertEqual(config.dim, self.dim)
27+
self.assertEqual(config.f_dim, self.f_dim)
28+
self.assertEqual(config.device, self.device)
29+
30+
self.assertEqual(config.u.shape, (self.batch_size, self.dim))
31+
self.assertEqual(config.x.shape, (self.batch_size, self.dim))
32+
self.assertEqual(config.fx.shape, (self.batch_size, self.f_dim))
33+
self.assertEqual(config.weight.shape, (self.batch_size,))
34+
self.assertEqual(config.detJ.shape, (self.batch_size,))
35+
36+
self.assertEqual(config.u.dtype, self.dtype)
37+
self.assertEqual(config.x.dtype, self.dtype)
38+
self.assertEqual(config.fx.dtype, self.dtype)
39+
self.assertEqual(config.weight.dtype, self.dtype)
40+
self.assertEqual(config.detJ.dtype, self.dtype)
41+
42+
943
class TestMap(unittest.TestCase):
1044
def setUp(self):
1145
self.device = "cpu"
@@ -24,6 +58,35 @@ def test_inverse_not_implemented(self):
2458
with self.assertRaises(NotImplementedError):
2559
self.map.inverse(torch.tensor([0.5, 0.5], dtype=self.dtype))
2660

61+
def test_forward_with_detJ(self):
62+
# Create a simple linear map for testing: x = u * A + b
63+
# With A=[1, 1] and b=[0, 0], we have x = u
64+
linear_map = LinearMap([1, 1], [0, 0], device=self.device)
65+
66+
# Test forward_with_detJ method
67+
u = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device)
68+
x, detJ = linear_map.forward_with_detJ(u)
69+
70+
# Since it's a linear map from [0,0] to [1,1], x should equal u
71+
self.assertTrue(torch.allclose(x, u))
72+
73+
# Determinant of Jacobian should be 1 for linear map with slope 1
74+
# forward_with_detJ returns actual determinant, not log
75+
self.assertAlmostEqual(detJ.item(), 1.0)
76+
77+
# Test with a different linear map: x = u * [2, 3] + [1, 1]
78+
# So u = [0.5, 0.5] should give x = [0.5*2+1, 0.5*3+1] = [2, 2.5]
79+
linear_map2 = LinearMap([2, 3], [1, 1], device=self.device)
80+
u2 = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device)
81+
x2, detJ2 = linear_map2.forward_with_detJ(u2)
82+
expected_x2 = torch.tensor(
83+
[[2.0, 2.5]], dtype=torch.float64, device=self.device
84+
)
85+
self.assertTrue(torch.allclose(x2, expected_x2))
86+
87+
# Determinant should be 2 * 3 = 6
88+
self.assertAlmostEqual(detJ2.item(), 6.0)
89+
2790

2891
class TestCompositeMap(unittest.TestCase):
2992
def setUp(self):
@@ -99,6 +162,32 @@ def test_initialization(self):
99162
self.assertTrue(torch.equal(self.vegas.grid, self.init_grid))
100163
self.assertEqual(self.vegas.inc.shape, (2, self.ninc))
101164

165+
def test_ninc_initialization_types(self):
166+
# Test ninc initialization with int
167+
vegas_int = Vegas(self.dim, ninc=5)
168+
self.assertEqual(vegas_int.ninc.tolist(), [5, 5])
169+
170+
# Test ninc initialization with list
171+
vegas_list = Vegas(self.dim, ninc=[5, 10])
172+
self.assertEqual(vegas_list.ninc.tolist(), [5, 10])
173+
174+
# Test ninc initialization with numpy array
175+
vegas_np = Vegas(self.dim, ninc=np.array([3, 7]))
176+
self.assertEqual(vegas_np.ninc.tolist(), [3, 7])
177+
178+
# Test ninc initialization with torch tensor
179+
vegas_tensor = Vegas(self.dim, ninc=torch.tensor([4, 6]))
180+
self.assertEqual(vegas_tensor.ninc.tolist(), [4, 6])
181+
182+
# Test ninc initialization with invalid type
183+
with self.assertRaises(ValueError):
184+
Vegas(self.dim, ninc="invalid")
185+
186+
def test_ninc_shape_validation(self):
187+
# Test ninc shape validation
188+
with self.assertRaises(ValueError):
189+
Vegas(self.dim, ninc=[1, 2, 3]) # Wrong length
190+
102191
def test_add_training_data(self):
103192
# Test adding training data
104193
self.vegas.add_training_data(self.sample)
@@ -137,6 +226,16 @@ def test_forward(self):
137226
self.assertEqual(x.shape, u.shape)
138227
self.assertEqual(log_jac.shape, (u.shape[0],))
139228

229+
def test_forward_with_detJ(self):
230+
# Test forward_with_detJ transformation
231+
u = torch.tensor([[0.1, 0.2], [0.3, 0.4]], dtype=torch.float64)
232+
x, det_jac = self.vegas.forward_with_detJ(u)
233+
self.assertEqual(x.shape, u.shape)
234+
self.assertEqual(det_jac.shape, (u.shape[0],))
235+
236+
# Determinant should be positive
237+
self.assertTrue(torch.all(det_jac > 0))
238+
140239
def test_forward_out_of_bounds(self):
141240
# Test forward transformation with out-of-bounds u values
142241
u = torch.tensor(
@@ -220,4 +319,4 @@ def test_edge_cases(self):
220319

221320

222321
if __name__ == "__main__":
223-
unittest.main()
322+
unittest.main()

MCintegration/mc_multicpu_test.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,9 @@ def two_integrands(x, f):
7979
if dist.is_initialized():
8080
dist.destroy_process_group()
8181

82+
def test_mcmc_singlethread():
83+
world_size = 1
84+
init_process(rank=0, world_size=world_size, fn=run_mcmc, backend=backend)
8285

8386
def test_mcmc(world_size=2):
8487
# Use fewer processes than CPU cores to avoid resource contention

0 commit comments

Comments
 (0)