Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ jobs:
python -m pip install --upgrade pip
pip install coverage
pip install pytest pytest-cov
pip install -e .[test]
pip install pytest-xdist
pip install -r requirements.txt

- name: Run tests
Expand All @@ -49,7 +51,10 @@ jobs:
export MASTER_ADDR=localhost
export MASTER_PORT=12345
export PYTHONPATH=MCintegration
export COVERAGE_PROCESS_START=.coveragerc
pytest --cov --cov-report=xml --ignore=examples
coverage combine
coverage xml

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
Expand Down
103 changes: 101 additions & 2 deletions MCintegration/maps_test.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,45 @@
import unittest
import torch

# import numpy as np
import numpy as np
from maps import Map, CompositeMap, Vegas, Configuration
from base import LinearMap


class TestConfiguration(unittest.TestCase):
def setUp(self):
self.batch_size = 5
self.dim = 3
self.f_dim = 2
self.device = "cpu"
self.dtype = torch.float64

def test_configuration_initialization(self):
config = Configuration(
batch_size=self.batch_size,
dim=self.dim,
f_dim=self.f_dim,
device=self.device,
dtype=self.dtype,
)

self.assertEqual(config.batch_size, self.batch_size)
self.assertEqual(config.dim, self.dim)
self.assertEqual(config.f_dim, self.f_dim)
self.assertEqual(config.device, self.device)

self.assertEqual(config.u.shape, (self.batch_size, self.dim))
self.assertEqual(config.x.shape, (self.batch_size, self.dim))
self.assertEqual(config.fx.shape, (self.batch_size, self.f_dim))
self.assertEqual(config.weight.shape, (self.batch_size,))
self.assertEqual(config.detJ.shape, (self.batch_size,))

self.assertEqual(config.u.dtype, self.dtype)
self.assertEqual(config.x.dtype, self.dtype)
self.assertEqual(config.fx.dtype, self.dtype)
self.assertEqual(config.weight.dtype, self.dtype)
self.assertEqual(config.detJ.dtype, self.dtype)


class TestMap(unittest.TestCase):
def setUp(self):
self.device = "cpu"
Expand All @@ -24,6 +58,35 @@ def test_inverse_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.map.inverse(torch.tensor([0.5, 0.5], dtype=self.dtype))

def test_forward_with_detJ(self):
# Create a simple linear map for testing: x = u * A + b
# With A=[1, 1] and b=[0, 0], we have x = u
linear_map = LinearMap([1, 1], [0, 0], device=self.device)

# Test forward_with_detJ method
u = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device)
x, detJ = linear_map.forward_with_detJ(u)

# Since it's a linear map from [0,0] to [1,1], x should equal u
self.assertTrue(torch.allclose(x, u))

# Determinant of Jacobian should be 1 for linear map with slope 1
# forward_with_detJ returns actual determinant, not log
self.assertAlmostEqual(detJ.item(), 1.0)

# Test with a different linear map: x = u * [2, 3] + [1, 1]
# So u = [0.5, 0.5] should give x = [0.5*2+1, 0.5*3+1] = [2, 2.5]
linear_map2 = LinearMap([2, 3], [1, 1], device=self.device)
u2 = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device)
x2, detJ2 = linear_map2.forward_with_detJ(u2)
expected_x2 = torch.tensor(
[[2.0, 2.5]], dtype=torch.float64, device=self.device
)
self.assertTrue(torch.allclose(x2, expected_x2))

# Determinant should be 2 * 3 = 6
self.assertAlmostEqual(detJ2.item(), 6.0)


class TestCompositeMap(unittest.TestCase):
def setUp(self):
Expand Down Expand Up @@ -99,6 +162,32 @@ def test_initialization(self):
self.assertTrue(torch.equal(self.vegas.grid, self.init_grid))
self.assertEqual(self.vegas.inc.shape, (2, self.ninc))

def test_ninc_initialization_types(self):
# Test ninc initialization with int
vegas_int = Vegas(self.dim, ninc=5)
self.assertEqual(vegas_int.ninc.tolist(), [5, 5])

# Test ninc initialization with list
vegas_list = Vegas(self.dim, ninc=[5, 10])
self.assertEqual(vegas_list.ninc.tolist(), [5, 10])

# Test ninc initialization with numpy array
vegas_np = Vegas(self.dim, ninc=np.array([3, 7]))
self.assertEqual(vegas_np.ninc.tolist(), [3, 7])

# Test ninc initialization with torch tensor
vegas_tensor = Vegas(self.dim, ninc=torch.tensor([4, 6]))
self.assertEqual(vegas_tensor.ninc.tolist(), [4, 6])

# Test ninc initialization with invalid type
with self.assertRaises(ValueError):
Vegas(self.dim, ninc="invalid")

def test_ninc_shape_validation(self):
# Test ninc shape validation
with self.assertRaises(ValueError):
Vegas(self.dim, ninc=[1, 2, 3]) # Wrong length

def test_add_training_data(self):
# Test adding training data
self.vegas.add_training_data(self.sample)
Expand Down Expand Up @@ -137,6 +226,16 @@ def test_forward(self):
self.assertEqual(x.shape, u.shape)
self.assertEqual(log_jac.shape, (u.shape[0],))

def test_forward_with_detJ(self):
# Test forward_with_detJ transformation
u = torch.tensor([[0.1, 0.2], [0.3, 0.4]], dtype=torch.float64)
x, det_jac = self.vegas.forward_with_detJ(u)
self.assertEqual(x.shape, u.shape)
self.assertEqual(det_jac.shape, (u.shape[0],))

# Determinant should be positive
self.assertTrue(torch.all(det_jac > 0))

def test_forward_out_of_bounds(self):
# Test forward transformation with out-of-bounds u values
u = torch.tensor(
Expand Down
5 changes: 4 additions & 1 deletion MCintegration/mc_multicpu_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,10 @@ def two_integrands(x, f):
if dist.is_initialized():
dist.destroy_process_group()


def test_mcmc_singlethread():
world_size = 1
init_process(rank=0, world_size=world_size, fn=run_mcmc, backend=backend)

def test_mcmc(world_size=2):
# Use fewer processes than CPU cores to avoid resource contention
world_size = min(world_size, mp.cpu_count())
Expand Down
79 changes: 59 additions & 20 deletions MCintegration/utils_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,17 +164,6 @@ def test_converged_criteria(self):
self.assertTrue(ravg.converged(0.1, 0.1))
self.assertFalse(ravg.converged(0.001, 0.001))

def test_multiplication_with_another_ravg(self):
ravg1 = RAvg(weighted=True)
ravg1.update(2.0, 0.1)
ravg2 = RAvg(weighted=True)
ravg2.update(3.0, 0.1)

result = ravg1 * ravg2
self.assertAlmostEqual(result.mean, 6.0)
sdev = (0.1 / 2**2 + 0.1 / 3**2) ** 0.5 * 6.0
self.assertAlmostEqual(result.sdev, sdev)

def test_multiplication(self):
ravg1 = RAvg(weighted=True)
# Test multiplication by another RAvg object
Expand Down Expand Up @@ -216,16 +205,19 @@ def test_multiplication(self):
np.allclose([r.sdev for r in result], [2.0 * ravg1.sdev, 3.0 * ravg1.sdev])
)

def test_division_with_another_ravg(self):
ravg1 = RAvg(weighted=True)
ravg1.update(6.0, 0.1)
ravg2 = RAvg(weighted=True)
ravg2.update(3.0, 0.1)
# Test multiplication with unweighted RAvg
ravg_unweighted = RAvg(weighted=False)
ravg_unweighted.update(2.0, 0.1)
result = ravg_unweighted * 3.0
self.assertAlmostEqual(result.mean, 6.0)
self.assertAlmostEqual(result.sdev, ravg_unweighted.sdev * 3)

result = ravg1 / ravg2
self.assertAlmostEqual(result.mean, 2.0)
sdev = (0.1 / 6.0**2 + 0.1 / 3.0**2) ** 0.5 * 2.0
self.assertAlmostEqual(result.sdev, sdev)
# Test multiplication with zero variance
ravg_zero_var = RAvg(weighted=True)
ravg_zero_var.update(2.0, 0.0)
result = ravg_zero_var * 3.0
self.assertAlmostEqual(result.mean, 6.0)
self.assertAlmostEqual(result.sdev, 0.0)

def test_division(self):
ravg1 = RAvg(weighted=True)
Expand Down Expand Up @@ -271,6 +263,53 @@ def test_division(self):
np.allclose([r.sdev for r in result], [ravg1.sdev / 2.0, ravg1.sdev / 3.0])
)

# Test division with unweighted RAvg
ravg_unweighted = RAvg(weighted=False)
ravg_unweighted.update(6.0, 0.1)
result = ravg_unweighted / 3.0
self.assertAlmostEqual(result.mean, 2.0)
self.assertAlmostEqual(result.sdev, ravg_unweighted.sdev / 3)

# Test division with zero variance
ravg_zero_var = RAvg(weighted=True)
ravg_zero_var.update(6.0, 0.0)
result = ravg_zero_var / 3.0
self.assertAlmostEqual(result.mean, 2.0)
self.assertAlmostEqual(result.sdev, 0.0)

# Test division of zero by RAvg
zero_ravg = RAvg(weighted=True)
zero_ravg.update(0.0, 0.1)
divisor_ravg = RAvg(weighted=True)
divisor_ravg.update(3.0, 0.1)
result = zero_ravg / divisor_ravg
self.assertAlmostEqual(result.mean, 0.0)
# sdev = (0.1 / 0.0**2 + 0.1 / 3.0**2) ** 0.5 * 0.0 # This would be NaN
# For 0/x, the error propagation gives 0 * sqrt((0.1/0.0^2) + (0.1/3.0^2))
# But since we're dividing by zero, we need to be careful
# In practice, gvar handles this appropriately

def test_vector_operations_not_implemented(self):
# Test that NotImplemented is returned for vector operations
ravg = RAvg(weighted=True)
ravg.update(2.0, 0.1)

# Test multiplication with list (should return NotImplemented)
result = ravg.__mul__([1, 2, 3])
self.assertEqual(result, NotImplemented)

# Test division with list (should return NotImplemented)
result = ravg.__truediv__([1, 2, 3])
self.assertEqual(result, NotImplemented)

# Test multiplication with numpy array (should return NotImplemented)
result = ravg.__mul__(np.array([1, 2, 3]))
self.assertEqual(result, NotImplemented)

# Test division with numpy array (should return NotImplemented)
result = ravg.__truediv__(np.array([1, 2, 3]))
self.assertEqual(result, NotImplemented)


class TestUtils(unittest.TestCase):
def setUp(self):
Expand Down
Loading
Loading