From c25adbdcb71a71321469c84c99abb2a162545e6e Mon Sep 17 00:00:00 2001 From: houpc Date: Sat, 2 Nov 2024 22:18:05 +0800 Subject: [PATCH 1/4] require in-place integrands f(x, fx) --- src/integrators.py | 152 ++++++++++++++++++++------------------------- src/maps.py | 26 ++++---- src/mc_test.py | 65 +++++++++---------- 3 files changed, 111 insertions(+), 132 deletions(-) diff --git a/src/integrators.py b/src/integrators.py index ecec374..66234c3 100644 --- a/src/integrators.py +++ b/src/integrators.py @@ -1,7 +1,7 @@ -from typing import Callable, Union, List, Tuple, Dict +from typing import Callable import torch from utils import RAvg -from maps import Map, Linear, CompositeMap +from maps import Linear from base import Uniform import gvar import numpy as np @@ -22,21 +22,25 @@ def __init__( neval: int = 1000, nbatch: int = None, device="cpu", - dtype=torch.float64, + x_dtype=torch.float64, + fx_dtype=torch.float64, ): - self.dtype = dtype + self.x_dtype = x_dtype + self.fx_dtype = fx_dtype if maps: - if not self.dtype == maps.dtype: - raise ValueError("Float type of maps should be same as integrator.") + if not self.x_dtype == maps.dtype: + raise ValueError( + "Data type of the variables of integrator should be same as maps." + ) self.bounds = maps.bounds else: if not isinstance(bounds, (list, np.ndarray)): raise TypeError("bounds must be a list or a NumPy array.") - self.bounds = torch.tensor(bounds, dtype=dtype, device=device) + self.bounds = torch.tensor(bounds, dtype=x_dtype, device=device) self.dim = len(self.bounds) if not q0: - q0 = Uniform(self.bounds, device=device, dtype=dtype) + q0 = Uniform(self.bounds, device=device, dtype=x_dtype) self.q0 = q0 self.maps = maps self.neval = neval @@ -70,42 +74,34 @@ def __init__( neval: int = 1000, nbatch: int = None, device="cpu", - dtype=torch.float64, + x_dtype=torch.float64, + fx_dtype=torch.float64, ): - super().__init__(maps, bounds, q0, neval, nbatch, device, dtype) + super().__init__(maps, bounds, q0, neval, nbatch, device, x_dtype, fx_dtype) - def __call__(self, f: Callable, **kwargs): - x, _ = self.sample(1) - f_values = f(x) - if isinstance(f_values, (list, tuple)) and isinstance( - f_values[0], torch.Tensor - ): - f_size = len(f_values) - type_fval = f_values[0].dtype - elif isinstance(f_values, torch.Tensor): - f_size = 1 - type_fval = f_values.dtype - else: - raise TypeError( - "f must return a torch.Tensor or a list/tuple of torch.Tensor." - ) + def __call__(self, f: Callable, f_dim: int = 1, **kwargs): + x, _ = self.sample(self.nbatch) + fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) + f(x, fx) epoch = self.neval // self.nbatch - values = torch.zeros((self.nbatch, f_size), dtype=type_fval, device=self.device) + integ_values = torch.zeros( + (self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device + ) - for iepoch in range(epoch): + for _ in range(epoch): x, log_detJ = self.sample(self.nbatch) - f_values = f(x) - batch_results = self._multiply_by_jacobian(f_values, torch.exp(log_detJ)) - - values += batch_results / epoch - - results = np.array([RAvg() for _ in range(f_size)]) - for i in range(f_size): - _mean = values[:, i].mean().item() - _var = values[:, i].var().item() / self.nbatch + f(x, fx) + # batch_results = self._multiply_by_jacobian(fx, torch.exp(log_detJ)) + fx.mul_(log_detJ.exp_().unsqueeze_(1)) + integ_values += fx / epoch + + results = np.array([RAvg() for _ in range(f_dim)]) + for i in range(f_dim): + _mean = integ_values[:, i].mean().item() + _var = integ_values[:, i].var().item() / self.nbatch results[i].update(_mean, _var, self.neval) - if f_size == 1: + if f_dim == 1: return results[0] else: return results @@ -145,13 +141,14 @@ def __init__( maps=None, bounds=None, q0=None, - neval=10000, - nbatch=None, - nburnin=500, + neval: int = 10000, + nbatch: int = None, + nburnin: int = 500, device="cpu", - dtype=torch.float64, + x_dtype=torch.float64, + fx_dtype=torch.float64, ): - super().__init__(maps, bounds, q0, neval, nbatch, device, dtype) + super().__init__(maps, bounds, q0, neval, nbatch, device, x_dtype, fx_dtype) self.nburnin = nburnin if maps is None: self.maps = Linear([(0, 1)] * self.dim, device=device) @@ -160,9 +157,10 @@ def __init__( def __call__( self, f: Callable, + f_dim: int = 1, proposal_dist: Callable = uniform, - thinning=1, mix_rate=0.5, + meas_freq: int = 1, **kwargs, ): epsilon = 1e-16 # Small value to ensure numerical stability @@ -170,52 +168,37 @@ def __call__( current_y, current_jac = self.q0.sample(self.nbatch) current_x, detJ = self.maps.forward(current_y) current_jac += detJ - current_jac = torch.exp(current_jac) - current_fval = f(current_x) - if isinstance(current_fval, (list, tuple)) and isinstance( - current_fval[0], torch.Tensor - ): - f_size = len(current_fval) - current_fval = sum(current_fval) - - def _integrand(x): - return sum(f(x)) - elif isinstance(current_fval, torch.Tensor): - f_size = 1 - - def _integrand(x): - return f(x) - else: - raise TypeError( - "f must return a torch.Tensor or a list/tuple of torch.Tensor." - ) - type_fval = current_fval.dtype + current_jac.exp_() + fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) + fx_weight = f(current_x, fx) + fx_weight.abs_() - current_weight = mix_rate / current_jac + (1 - mix_rate) * current_fval.abs() + current_weight = mix_rate / current_jac + (1 - mix_rate) * fx_weight current_weight.masked_fill_(current_weight < epsilon, epsilon) - n_meas = epoch // thinning + n_meas = epoch // meas_freq def one_step(current_y, current_x, current_weight, current_jac): proposed_y = proposal_dist( - self.dim, self.bounds, self.device, self.dtype, current_y, **kwargs + self.dim, self.bounds, self.device, self.x_dtype, current_y, **kwargs ) proposed_x, new_jac = self.maps.forward(proposed_y) - new_jac = torch.exp(new_jac) + new_jac.exp_() - new_fval = _integrand(proposed_x) - new_weight = mix_rate / new_jac + (1 - mix_rate) * new_fval.abs() + fx_weight = f(proposed_x, fx) + fx_weight.abs_() + new_weight = mix_rate / new_jac + (1 - mix_rate) * fx_weight new_weight.masked_fill_(new_weight < epsilon, epsilon) acceptance_probs = new_weight / current_weight * new_jac / current_jac accept = ( - torch.rand(self.nbatch, dtype=self.dtype, device=self.device) + torch.rand(self.nbatch, dtype=torch.float64, device=self.device) <= acceptance_probs ) current_y = torch.where(accept.unsqueeze(1), proposed_y, current_y) - # current_fval = torch.where(accept, new_fval, current_fval) + # fx = torch.where(accept, new_fval, fx) current_x = torch.where(accept.unsqueeze(1), proposed_x, current_x) current_weight = torch.where(accept, new_weight, current_weight) current_jac = torch.where(accept, new_jac, current_jac) @@ -226,36 +209,35 @@ def one_step(current_y, current_x, current_weight, current_jac): current_y, current_x, current_weight, current_jac ) - values = torch.zeros((self.nbatch, f_size), dtype=type_fval, device=self.device) - refvalues = torch.zeros(self.nbatch, dtype=type_fval, device=self.device) + values = torch.zeros( + (self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device + ) + refvalues = torch.zeros(self.nbatch, dtype=self.fx_dtype, device=self.device) - for imeas in range(n_meas): - for j in range(thinning): + for _ in range(n_meas): + for _ in range(meas_freq): current_y, current_x, current_weight, current_jac = one_step( current_y, current_x, current_weight, current_jac ) + f(current_x, fx) - batch_results = self._multiply_by_jacobian( - f(current_x), 1.0 / current_weight - ) - batch_results_ref = 1 / (current_jac * current_weight) - - values += batch_results / n_meas - refvalues += batch_results_ref / n_meas + fx.div_(current_weight.unsqueeze(1)) + values += fx / n_meas + refvalues += 1 / (current_jac * current_weight) / n_meas - results = np.array([RAvg() for _ in range(f_size)]) + results = np.array([RAvg() for _ in range(f_dim)]) results_ref = RAvg() mean_ref = refvalues.mean().item() var_ref = refvalues.var().item() / self.nbatch results_ref.update(mean_ref, var_ref, self.neval) - for i in range(f_size): + for i in range(f_dim): _mean = values[:, i].mean().item() _var = values[:, i].var().item() / self.nbatch results[i].update(_mean, _var, self.neval) - if f_size == 1: + if f_dim == 1: res = results[0] / results_ref * self._rangebounds.prod() result = RAvg(itn_results=[res], sum_neval=self.neval) return result diff --git a/src/maps.py b/src/maps.py index 6f6fc1d..28ff352 100644 --- a/src/maps.py +++ b/src/maps.py @@ -77,35 +77,31 @@ def __init__(self, bounds, ninc=1000, alpha=0.5, device="cpu", dtype=torch.float elif isinstance(ninc, torch.Tensor): self.ninc = ninc.to(dtype=torch.int32, device=device) else: - raise ValueError("'ninc' must be an int, list, numpy array, or torch tensor.") - + raise ValueError( + "'ninc' must be an int, list, numpy array, or torch tensor." + ) + # Ensure ninc has the correct shape if self.ninc.shape != (self.dim,): - raise ValueError(f"'ninc' must be a scalar or a 1D array of length {self.dim}.") + raise ValueError( + f"'ninc' must be a scalar or a 1D array of length {self.dim}." + ) self.make_uniform() self.alpha = alpha self._A = self.bounds[:, 1] - self.bounds[:, 0] self._jaclinear = torch.prod(self._A) - def train(self, nsamples, f, epoch=5, alpha=0.5): + def train(self, nsamples, f, f_dim=1, fx_dtype=torch.float64, epoch=5, alpha=0.5): q0 = Uniform(self.bounds, device=self.device, dtype=self.dtype) u, log_detJ0 = q0.sample(nsamples) - fval = f(u) - f_size = len(fval) if isinstance(fval, (list, tuple)) else 1 - if f_size > 1: - - def _integrand(x): - return sum(f(x)) - else: - - def _integrand(x): - return f(x) + fx = torch.empty(nsamples, f_dim, device=self.device, dtype=fx_dtype) for _ in range(epoch): x, log_detJ = self.forward(u) - f2 = torch.exp(2 * (log_detJ + log_detJ0)) * _integrand(x) ** 2 + fx_weight = f(x, fx) + f2 = torch.exp(2 * (log_detJ + log_detJ0)) * fx_weight**2 self.add_training_data(u, f2) self.adapt(alpha) diff --git a/src/mc_test.py b/src/mc_test.py index 65da182..06152d6 100644 --- a/src/mc_test.py +++ b/src/mc_test.py @@ -5,40 +5,41 @@ from utils import set_seed, get_device set_seed(42) -# device = get_device() -device = torch.device("cpu") +device = get_device() +# device = torch.device("cpu") def test_nothing(): pass -def unit_circle_integrand(x): - inside_circle = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() - return inside_circle +def unit_circle_integrand(x, f): + f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() + return f[:, 0] -def half_sphere_integrand(x): - return torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 +def half_sphere_integrand(x, f): + f[:, 0] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 + return f[:, 0] -def integrand_list(x): - return [unit_circle_integrand(x), half_sphere_integrand(x) / 2] +def two_integrands(x, f): + f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() + f[:, 1] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 + return f.mean(dim=-1) -def integrand_list1(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - f = torch.exp(-200 * dx2) - return [f, f * x[:, 0], f * x[:, 0] ** 2] - - -def sharp_peak(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - return torch.exp(-200 * dx2) +def sharp_integrands(x, f, dim=4): + # dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + # dx2 += (x[:, d] - 0.5) ** 2 + # f[:, 0] = torch.exp(-200 * dx2) + f[:, 0] *= -200 + f[:, 0].exp_() + f[:, 1] = f[:, 0] * x[:, 0] + f[:, 2] = f[:, 0] * x[:, 0] ** 2 + return f.mean(dim=-1) dim = 2 @@ -116,25 +117,25 @@ def sharp_peak(x): print("\nCalculate the integral [f(x1, x2), g(x1, x2)/2] in the bounds [-1, 1]^2") # Two integrands -res = mc_integrator(integrand_list) +res = mc_integrator(two_integrands, f_dim=2) print("Plain MC Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) -res = mcmc_integrator(integrand_list, mix_rate=0.5) +res = mcmc_integrator(two_integrands, f_dim=2, mix_rate=0.5) print("MCMC Integral results:") print(f" Integral 1: ", res[0]) print(f" Integral 2: ", res[1]) # print("VEAGS map is trained for g(x1, x2)") vegas_map.make_uniform() -vegas_map.train(20000, integrand_list, epoch=10, alpha=0.5) -res = vegas_integrator(integrand_list) +vegas_map.train(20000, two_integrands, f_dim=2, epoch=10, alpha=0.5) +res = vegas_integrator(two_integrands, f_dim=2) print("VEGAS Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) -res = vegasmcmc_integrator(integrand_list, mix_rate=0.5) +res = vegasmcmc_integrator(two_integrands, f_dim=2, mix_rate=0.5) print("VEGAS-MCMC Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) @@ -154,7 +155,7 @@ def sharp_peak(x): bounds=bounds, neval=n_eval, nbatch=n_batch, nburnin=n_therm, device=device ) print("Plain MC Integral results:") -res = mc_integrator(integrand_list1) +res = mc_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -166,7 +167,7 @@ def sharp_peak(x): res[1] / res[0], ) print("MCMC Integral results:") -res = mcmc_integrator(integrand_list1, mix_rate=0.5) +res = mcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0], @@ -181,7 +182,7 @@ def sharp_peak(x): vegas_map = Vegas(bounds, device=device) print("train VEGAS map for h(X)...") # vegas_map.train(20000, sharp_peak, epoch=10, alpha=0.5) -vegas_map.train(20000, integrand_list1, epoch=10, alpha=0.5) +vegas_map.train(20000, sharp_integrands, f_dim=3, epoch=10, alpha=0.5) print("VEGAS Integral results:") vegas_integrator = MonteCarlo( @@ -191,7 +192,7 @@ def sharp_peak(x): # nbatch=n_eval, device=device, ) -res = vegas_integrator(integrand_list1) +res = vegas_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -211,7 +212,7 @@ def sharp_peak(x): nburnin=n_therm, device=device, ) -res = vegasmcmc_integrator(integrand_list1, mix_rate=0.5) +res = vegasmcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0], From 35a707077feecc77ab9bb4489bf25ea851d3ce73 Mon Sep 17 00:00:00 2001 From: houpc Date: Sat, 2 Nov 2024 23:33:21 +0800 Subject: [PATCH 2/4] bugfix and udpate unittests --- src/integrators.py | 18 ++---- src/integrators_test.py | 122 ++++++++++++++-------------------------- src/maps_test.py | 5 +- src/mc_test.py | 7 +-- src/vegas_test.py | 42 ++++++++------ 5 files changed, 76 insertions(+), 118 deletions(-) diff --git a/src/integrators.py b/src/integrators.py index 66234c3..967f3ef 100644 --- a/src/integrators.py +++ b/src/integrators.py @@ -82,7 +82,6 @@ def __init__( def __call__(self, f: Callable, f_dim: int = 1, **kwargs): x, _ = self.sample(self.nbatch) fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) - f(x, fx) epoch = self.neval // self.nbatch integ_values = torch.zeros( @@ -92,7 +91,6 @@ def __call__(self, f: Callable, f_dim: int = 1, **kwargs): for _ in range(epoch): x, log_detJ = self.sample(self.nbatch) f(x, fx) - # batch_results = self._multiply_by_jacobian(fx, torch.exp(log_detJ)) fx.mul_(log_detJ.exp_().unsqueeze_(1)) integ_values += fx / epoch @@ -106,14 +104,6 @@ def __call__(self, f: Callable, f_dim: int = 1, **kwargs): else: return results - def _multiply_by_jacobian(self, values, jac): - # if isinstance(values, dict): - # return {k: v * torch.exp(log_det_J) for k, v in values.items()} - if isinstance(values, (list, tuple)): - return torch.stack([v * jac for v in values], dim=-1) - else: - return torch.stack([values * jac], dim=-1) - def random_walk(dim, bounds, device, dtype, u, **kwargs): rangebounds = bounds[:, 1] - bounds[:, 0] @@ -170,7 +160,8 @@ def __call__( current_jac += detJ current_jac.exp_() fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) - fx_weight = f(current_x, fx) + fx_weight = torch.empty(self.nbatch, dtype=self.fx_dtype, device=self.device) + fx_weight[:] = f(current_x, fx) fx_weight.abs_() current_weight = mix_rate / current_jac + (1 - mix_rate) * fx_weight @@ -185,7 +176,7 @@ def one_step(current_y, current_x, current_weight, current_jac): proposed_x, new_jac = self.maps.forward(proposed_y) new_jac.exp_() - fx_weight = f(proposed_x, fx) + fx_weight[:] = f(proposed_x, fx) fx_weight.abs_() new_weight = mix_rate / new_jac + (1 - mix_rate) * fx_weight new_weight.masked_fill_(new_weight < epsilon, epsilon) @@ -198,13 +189,12 @@ def one_step(current_y, current_x, current_weight, current_jac): ) current_y = torch.where(accept.unsqueeze(1), proposed_y, current_y) - # fx = torch.where(accept, new_fval, fx) current_x = torch.where(accept.unsqueeze(1), proposed_x, current_x) current_weight = torch.where(accept, new_weight, current_weight) current_jac = torch.where(accept, new_jac, current_jac) return current_y, current_x, current_weight, current_jac - for i in range(self.nburnin): + for _ in range(self.nburnin): current_y, current_x, current_weight, current_jac = one_step( current_y, current_x, current_weight, current_jac ) diff --git a/src/integrators_test.py b/src/integrators_test.py index fb29d94..33d41f4 100644 --- a/src/integrators_test.py +++ b/src/integrators_test.py @@ -16,14 +16,14 @@ def setUp(self): def test_init_with_bounds(self): integrator = Integrator( - bounds=self.bounds, device=self.device, dtype=self.dtype + bounds=self.bounds, device=self.device, x_dtype=self.dtype ) self.assertEqual(integrator.bounds.tolist(), self.bounds) self.assertEqual(integrator.dim, 2) self.assertEqual(integrator.neval, self.neval) self.assertEqual(integrator.nbatch, self.neval) self.assertEqual(integrator.device, self.device) - self.assertEqual(integrator.dtype, self.dtype) + self.assertEqual(integrator.x_dtype, self.dtype) def test_init_with_maps(self): class MockMaps: @@ -32,13 +32,13 @@ def __init__(self, bounds, dtype): self.dtype = dtype maps = MockMaps(bounds=self.bounds, dtype=self.dtype) - integrator = Integrator(maps=maps, device=self.device, dtype=self.dtype) + integrator = Integrator(maps=maps, device=self.device, x_dtype=self.dtype) self.assertEqual(integrator.bounds.tolist(), self.bounds) self.assertEqual(integrator.dim, 2) self.assertEqual(integrator.neval, self.neval) self.assertEqual(integrator.nbatch, self.neval) self.assertEqual(integrator.device, self.device) - self.assertEqual(integrator.dtype, self.dtype) + self.assertEqual(integrator.x_dtype, self.dtype) def test_init_with_mismatched_dtype(self): class MockMaps: @@ -48,15 +48,15 @@ def __init__(self, bounds, dtype): maps = MockMaps(bounds=self.bounds, dtype=torch.float32) with self.assertRaises(ValueError): - Integrator(maps=maps, device=self.device, dtype=self.dtype) + Integrator(maps=maps, device=self.device, x_dtype=self.dtype) def test_init_with_invalid_bounds(self): with self.assertRaises(TypeError): - Integrator(bounds=123, device=self.device, dtype=self.dtype) + Integrator(bounds=123, device=self.device, x_dtype=self.dtype) def test_sample_without_maps(self): integrator = Integrator( - bounds=self.bounds, device=self.device, dtype=self.dtype + bounds=self.bounds, device=self.device, x_dtype=self.dtype ) u, log_detJ = integrator.sample(100) self.assertEqual(u.shape, (100, 2)) @@ -72,7 +72,7 @@ def forward(self, u): return u, torch.zeros(u.shape[0], dtype=self.dtype) maps = MockMaps(bounds=self.bounds, dtype=self.dtype) - integrator = Integrator(maps=maps, device=self.device, dtype=self.dtype) + integrator = Integrator(maps=maps, device=self.device, x_dtype=self.dtype) u, log_detJ = integrator.sample(100) self.assertEqual(u.shape, (100, 2)) self.assertEqual(log_detJ.shape, (100,)) @@ -98,6 +98,15 @@ def setUp(self): self.dtype, ) + def f(self, x, fx): + fx[:, 0] = x.sum(dim=1) + return fx[:, 0] + + def f2(self, x, fx): + fx[:, 0] = x.sum(dim=1) + fx[:, 1] = x.prod(dim=1) + return fx.mean(dim=1) + def tearDown(self): # Teardown after tests pass @@ -108,7 +117,7 @@ def test_initialization(self): self.assertEqual(self.monte_carlo.neval, self.neval) self.assertEqual(self.monte_carlo.nbatch, self.nbatch) self.assertEqual(self.monte_carlo.device, self.device) - self.assertEqual(self.monte_carlo.dtype, self.dtype) + self.assertEqual(self.monte_carlo.x_dtype, self.dtype) self.assertTrue( torch.equal( self.monte_carlo.bounds, @@ -119,10 +128,7 @@ def test_initialization(self): def test_call_single_tensor_output(self): # Test the __call__ method with a function that returns a single tensor - def f(x): - return x.sum(dim=1) - - result = self.monte_carlo(f) + result = self.monte_carlo(self.f) self.assertIsInstance(result, RAvg) self.assertEqual(result.shape, ()) self.assertIsInstance(result.mean, float) @@ -130,10 +136,7 @@ def f(x): def test_call_multiple_tensor_output(self): # Test the __call__ method with a function that returns a list of tensors - def f(x): - return [x.sum(dim=1), x.prod(dim=1)] - - result = self.monte_carlo(f) + result = self.monte_carlo(self.f2, f_dim=2) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.shape, (2,)) self.assertEqual(result.dtype, RAvg) @@ -146,25 +149,6 @@ def f(x): with self.assertRaises(TypeError): self.monte_carlo(f) - def test_multiply_by_jacobian_single_tensor(self): - # Test the _multiply_by_jacobian method with a single tensor - values = torch.tensor([1.0, 2.0, 3.0], dtype=self.dtype) - jac = torch.tensor([0.5, 0.5, 0.5], dtype=self.dtype) - result = self.monte_carlo._multiply_by_jacobian(values, jac) - expected = torch.tensor([[0.5], [1.0], [1.5]], dtype=self.dtype) - self.assertTrue(torch.allclose(result, expected)) - - def test_multiply_by_jacobian_multiple_tensors(self): - # Test the _multiply_by_jacobian method with a list of tensors - values = [ - torch.tensor([1.0, 2.0, 3.0], dtype=self.dtype), - torch.tensor([4.0, 5.0, 6.0], dtype=self.dtype), - ] - jac = torch.tensor([0.5, 0.5, 0.5], dtype=self.dtype) - result = self.monte_carlo._multiply_by_jacobian(values, jac) - expected = torch.tensor([[0.5, 2.0], [1.0, 2.5], [1.5, 3.0]], dtype=self.dtype) - self.assertTrue(torch.allclose(result, expected)) - def test_sample_method(self): # Test the sample method to ensure it returns the correct shape x, log_detJ = self.monte_carlo.sample(self.nbatch) @@ -172,11 +156,7 @@ def test_sample_method(self): self.assertEqual(log_detJ.shape, (self.nbatch,)) def test_call_with_cuda(self): - # Test the __call__ method with different device values - def f(x): - return x.sum(dim=1) - - # Test with device = "cuda" if available + # Test the __call__ method with device = "cuda" if available if torch.cuda.is_available(): monte_carlo_cuda = MonteCarlo( self.maps, @@ -187,7 +167,7 @@ def f(x): "cuda", self.dtype, ) - result_cuda = monte_carlo_cuda(f) + result_cuda = monte_carlo_cuda(self.f) self.assertIsInstance(result_cuda, RAvg) self.assertIsInstance(result_cuda.mean, float) self.assertIsInstance(result_cuda.sdev, float) @@ -195,9 +175,6 @@ def f(x): def test_call_with_different_dtype(self): # Test the __call__ method with different dtype values - def f(x): - return x.sum(dim=1) - # Test with dtype = torch.float32 monte_carlo_float32 = MonteCarlo( self.maps, @@ -208,7 +185,7 @@ def f(x): self.device, torch.float32, ) - result_float32 = monte_carlo_float32(f) + result_float32 = monte_carlo_float32(self.f) self.assertIsInstance(result_float32, RAvg) self.assertIsInstance(result_float32.mean, float) self.assertIsInstance(result_float32.sdev, float) @@ -216,9 +193,6 @@ def f(x): def test_call_with_different_bounds(self): # Test the __call__ method with different bounds values - def f(x): - return x.sum(dim=1) - # Test with bounds = [0, 2] monte_carlo_bounds_0_2 = MonteCarlo( self.maps, @@ -229,7 +203,7 @@ def f(x): self.device, self.dtype, ) - result_bounds_0_2 = monte_carlo_bounds_0_2(f) + result_bounds_0_2 = monte_carlo_bounds_0_2(self.f) self.assertIsInstance(result_bounds_0_2, RAvg) self.assertEqual(result_bounds_0_2.shape, ()) @@ -243,7 +217,7 @@ def f(x): self.device, self.dtype, ) - result_bounds_minus1_1 = monte_carlo_bounds_minus1_1(f) + result_bounds_minus1_1 = monte_carlo_bounds_minus1_1(self.f) self.assertIsInstance(result_bounds_minus1_1, RAvg) self.assertEqual(result_bounds_minus1_1.shape, ()) @@ -270,6 +244,15 @@ def setUp(self): self.dtype, ) + def f(self, x, fx): + fx[:, 0] = x.sum(dim=1) + return fx[:, 0] + + def f2(self, x, fx): + fx[:, 0] = x.sum(dim=1) + fx[:, 1] = x.prod(dim=1) + return fx.mean(dim=1) + def tearDown(self): # Teardown after tests pass @@ -281,7 +264,7 @@ def test_initialization(self): self.assertEqual(self.mcmc.nbatch, self.nbatch) self.assertEqual(self.mcmc.nburnin, self.nburnin) self.assertEqual(self.mcmc.device, self.device) - self.assertEqual(self.mcmc.dtype, self.dtype) + self.assertEqual(self.mcmc.x_dtype, self.dtype) self.assertTrue( torch.equal( self.mcmc.bounds, @@ -292,10 +275,7 @@ def test_initialization(self): def test_call_single_tensor_output(self): # Test the __call__ method with a function that returns a single tensor - def f(x): - return x.sum(dim=1) - - result = self.mcmc(f) + result = self.mcmc(self.f) self.assertIsInstance(result, RAvg) self.assertEqual(result.shape, ()) self.assertIsInstance(result.mean, float) @@ -303,10 +283,7 @@ def f(x): def test_call_multiple_tensor_output(self): # Test the __call__ method with a function that returns a list of tensors - def f(x): - return [x.sum(dim=1), x.prod(dim=1)] - - result = self.mcmc(f) + result = self.mcmc(self.f2, f_dim=2) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.shape, (2,)) self.assertEqual(result.dtype, RAvg) @@ -320,11 +297,7 @@ def f(x): self.mcmc(f) def test_call_with_different_device(self): - # Test the __call__ method with different device values - def f(x): - return x.sum(dim=1) - - # Test with device = "cuda" if available + # Test the __call__ method with device = "cuda" if available if torch.cuda.is_available(): mcmc_cuda = MCMC( self.maps, @@ -336,15 +309,12 @@ def f(x): "cuda", self.dtype, ) - result_cuda = mcmc_cuda(f) + result_cuda = mcmc_cuda(self.f) self.assertIsInstance(result_cuda, RAvg) self.assertEqual(result_cuda.shape, ()) def test_call_with_different_dtype(self): # Test the __call__ method with different dtype values - def f(x): - return x.sum(dim=1) - # Test with dtype = torch.float32 mcmc_float32 = MCMC( self.maps, @@ -356,7 +326,7 @@ def f(x): self.device, torch.float32, ) - result_float32 = mcmc_float32(f) + result_float32 = mcmc_float32(self.f) self.assertIsInstance(result_float32, RAvg) self.assertEqual(result_float32.shape, ()) self.assertIsInstance(result_float32.mean, float) @@ -364,9 +334,6 @@ def f(x): def test_call_with_different_bounds(self): # Test the __call__ method with different bounds values - def f(x): - return x.sum(dim=1) - # Test with bounds = [0, 2] mcmc_bounds = MCMC( self.maps, @@ -378,24 +345,21 @@ def f(x): self.device, self.dtype, ) - result_bounds = mcmc_bounds(f) + result_bounds = mcmc_bounds(self.f) self.assertIsInstance(result_bounds, RAvg) self.assertEqual(result_bounds.shape, ()) def test_call_with_different_proposal_dist(self): # Test the __call__ method with different proposal distributions - def f(x): - return x.sum(dim=1) - from integrators import random_walk, gaussian # Test with uniform proposal distribution - result_rw = self.mcmc(f, proposal_dist=random_walk) + result_rw = self.mcmc(self.f, proposal_dist=random_walk) self.assertIsInstance(result_rw, RAvg) self.assertEqual(result_rw.shape, ()) # Test with normal proposal distribution - result_normal = self.mcmc(f, proposal_dist=gaussian) + result_normal = self.mcmc(self.f, proposal_dist=gaussian) self.assertIsInstance(result_normal, RAvg) self.assertEqual(result_normal.shape, ()) diff --git a/src/maps_test.py b/src/maps_test.py index 97f12ff..7f153fe 100644 --- a/src/maps_test.py +++ b/src/maps_test.py @@ -170,8 +170,9 @@ def test_inverse(self): def test_train(self): # Test training the Vegas class - def f(x): - return torch.sum(x, dim=1) + def f(x, fx): + fx[:, 0] = torch.sum(x, dim=1) + return fx[:, 0] nsamples = 100 epoch = 5 diff --git a/src/mc_test.py b/src/mc_test.py index 06152d6..e038226 100644 --- a/src/mc_test.py +++ b/src/mc_test.py @@ -25,16 +25,14 @@ def half_sphere_integrand(x, f): def two_integrands(x, f): f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() - f[:, 1] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 + f[:, 1] = -torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) return f.mean(dim=-1) def sharp_integrands(x, f, dim=4): - # dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) + f.zero_() for d in range(dim): f[:, 0] += (x[:, d] - 0.5) ** 2 - # dx2 += (x[:, d] - 0.5) ** 2 - # f[:, 0] = torch.exp(-200 * dx2) f[:, 0] *= -200 f[:, 0].exp_() f[:, 1] = f[:, 0] * x[:, 0] @@ -181,7 +179,6 @@ def sharp_integrands(x, f, dim=4): vegas_map = Vegas(bounds, device=device) print("train VEGAS map for h(X)...") -# vegas_map.train(20000, sharp_peak, epoch=10, alpha=0.5) vegas_map.train(20000, sharp_integrands, f_dim=3, epoch=10, alpha=0.5) print("VEGAS Integral results:") diff --git a/src/vegas_test.py b/src/vegas_test.py index addc8fc..a1e847e 100644 --- a/src/vegas_test.py +++ b/src/vegas_test.py @@ -4,28 +4,34 @@ from maps import Vegas, Linear from utils import set_seed, get_device -# set_seed(42) -# device = get_device() -device = torch.device("cpu") +set_seed(42) +device = get_device() +# device = torch.device("cpu") -def integrand_list1(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - f = torch.exp(-200 * dx2) - return [f, f * x[:, 0], f * x[:, 0] ** 2] +def sharp_peak(x, f, dim=4): + f.zero_() + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + f[:, 0] *= -200 + f[:, 0].exp_() + return f[:, 0] -def sharp_peak(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - return torch.exp(-200 * dx2) +def sharp_integrands(x, f, dim=4): + f.zero_() + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + f[:, 0] *= -200 + f[:, 0].exp_() + f[:, 1] = f[:, 0] * x[:, 0] + f[:, 2] = f[:, 0] * x[:, 0] ** 2 + return f.mean(dim=-1) -def func(x): - return torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) +def func(x, f): + f[:, 0] = torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) + return f[:, 0] ninc = 1000 @@ -76,7 +82,7 @@ def func(x): nbatch=n_batch, device=device, ) -res = vegas_integrator(integrand_list1) +res = vegas_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -102,7 +108,7 @@ def func(x): nburnin=n_therm, device=device, ) -res = vegasmcmc_integrator(integrand_list1, mix_rate=0.5) +res = vegasmcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0], From fc57c53acd4d7496c1249ccb4084299f5c6ba0ef Mon Sep 17 00:00:00 2001 From: houpc Date: Sun, 3 Nov 2024 11:26:56 +0800 Subject: [PATCH 3/4] set dtype argument for both x and fx --- src/integrators.py | 39 +++++++++++++++++---------------------- src/integrators_test.py | 20 ++++++++++---------- src/maps.py | 4 ++-- 3 files changed, 29 insertions(+), 34 deletions(-) diff --git a/src/integrators.py b/src/integrators.py index 967f3ef..0fe1f10 100644 --- a/src/integrators.py +++ b/src/integrators.py @@ -22,13 +22,12 @@ def __init__( neval: int = 1000, nbatch: int = None, device="cpu", - x_dtype=torch.float64, - fx_dtype=torch.float64, + dtype=torch.float64, ): - self.x_dtype = x_dtype - self.fx_dtype = fx_dtype + self.dtype = dtype + self.dtype = dtype if maps: - if not self.x_dtype == maps.dtype: + if not self.dtype == maps.dtype: raise ValueError( "Data type of the variables of integrator should be same as maps." ) @@ -36,11 +35,11 @@ def __init__( else: if not isinstance(bounds, (list, np.ndarray)): raise TypeError("bounds must be a list or a NumPy array.") - self.bounds = torch.tensor(bounds, dtype=x_dtype, device=device) + self.bounds = torch.tensor(bounds, dtype=dtype, device=device) self.dim = len(self.bounds) if not q0: - q0 = Uniform(self.bounds, device=device, dtype=x_dtype) + q0 = Uniform(self.bounds, device=device, dtype=dtype) self.q0 = q0 self.maps = maps self.neval = neval @@ -74,18 +73,17 @@ def __init__( neval: int = 1000, nbatch: int = None, device="cpu", - x_dtype=torch.float64, - fx_dtype=torch.float64, + dtype=torch.float64, ): - super().__init__(maps, bounds, q0, neval, nbatch, device, x_dtype, fx_dtype) + super().__init__(maps, bounds, q0, neval, nbatch, device, dtype) def __call__(self, f: Callable, f_dim: int = 1, **kwargs): x, _ = self.sample(self.nbatch) - fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) + fx = torch.empty((self.nbatch, f_dim), dtype=self.dtype, device=self.device) epoch = self.neval // self.nbatch integ_values = torch.zeros( - (self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device + (self.nbatch, f_dim), dtype=self.dtype, device=self.device ) for _ in range(epoch): @@ -135,10 +133,9 @@ def __init__( nbatch: int = None, nburnin: int = 500, device="cpu", - x_dtype=torch.float64, - fx_dtype=torch.float64, + dtype=torch.float64, ): - super().__init__(maps, bounds, q0, neval, nbatch, device, x_dtype, fx_dtype) + super().__init__(maps, bounds, q0, neval, nbatch, device, dtype) self.nburnin = nburnin if maps is None: self.maps = Linear([(0, 1)] * self.dim, device=device) @@ -159,8 +156,8 @@ def __call__( current_x, detJ = self.maps.forward(current_y) current_jac += detJ current_jac.exp_() - fx = torch.empty((self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device) - fx_weight = torch.empty(self.nbatch, dtype=self.fx_dtype, device=self.device) + fx = torch.empty((self.nbatch, f_dim), dtype=self.dtype, device=self.device) + fx_weight = torch.empty(self.nbatch, dtype=self.dtype, device=self.device) fx_weight[:] = f(current_x, fx) fx_weight.abs_() @@ -171,7 +168,7 @@ def __call__( def one_step(current_y, current_x, current_weight, current_jac): proposed_y = proposal_dist( - self.dim, self.bounds, self.device, self.x_dtype, current_y, **kwargs + self.dim, self.bounds, self.device, self.dtype, current_y, **kwargs ) proposed_x, new_jac = self.maps.forward(proposed_y) new_jac.exp_() @@ -199,10 +196,8 @@ def one_step(current_y, current_x, current_weight, current_jac): current_y, current_x, current_weight, current_jac ) - values = torch.zeros( - (self.nbatch, f_dim), dtype=self.fx_dtype, device=self.device - ) - refvalues = torch.zeros(self.nbatch, dtype=self.fx_dtype, device=self.device) + values = torch.zeros((self.nbatch, f_dim), dtype=self.dtype, device=self.device) + refvalues = torch.zeros(self.nbatch, dtype=self.dtype, device=self.device) for _ in range(n_meas): for _ in range(meas_freq): diff --git a/src/integrators_test.py b/src/integrators_test.py index 33d41f4..058d951 100644 --- a/src/integrators_test.py +++ b/src/integrators_test.py @@ -16,14 +16,14 @@ def setUp(self): def test_init_with_bounds(self): integrator = Integrator( - bounds=self.bounds, device=self.device, x_dtype=self.dtype + bounds=self.bounds, device=self.device, dtype=self.dtype ) self.assertEqual(integrator.bounds.tolist(), self.bounds) self.assertEqual(integrator.dim, 2) self.assertEqual(integrator.neval, self.neval) self.assertEqual(integrator.nbatch, self.neval) self.assertEqual(integrator.device, self.device) - self.assertEqual(integrator.x_dtype, self.dtype) + self.assertEqual(integrator.dtype, self.dtype) def test_init_with_maps(self): class MockMaps: @@ -32,13 +32,13 @@ def __init__(self, bounds, dtype): self.dtype = dtype maps = MockMaps(bounds=self.bounds, dtype=self.dtype) - integrator = Integrator(maps=maps, device=self.device, x_dtype=self.dtype) + integrator = Integrator(maps=maps, device=self.device, dtype=self.dtype) self.assertEqual(integrator.bounds.tolist(), self.bounds) self.assertEqual(integrator.dim, 2) self.assertEqual(integrator.neval, self.neval) self.assertEqual(integrator.nbatch, self.neval) self.assertEqual(integrator.device, self.device) - self.assertEqual(integrator.x_dtype, self.dtype) + self.assertEqual(integrator.dtype, self.dtype) def test_init_with_mismatched_dtype(self): class MockMaps: @@ -48,15 +48,15 @@ def __init__(self, bounds, dtype): maps = MockMaps(bounds=self.bounds, dtype=torch.float32) with self.assertRaises(ValueError): - Integrator(maps=maps, device=self.device, x_dtype=self.dtype) + Integrator(maps=maps, device=self.device, dtype=self.dtype) def test_init_with_invalid_bounds(self): with self.assertRaises(TypeError): - Integrator(bounds=123, device=self.device, x_dtype=self.dtype) + Integrator(bounds=123, device=self.device, dtype=self.dtype) def test_sample_without_maps(self): integrator = Integrator( - bounds=self.bounds, device=self.device, x_dtype=self.dtype + bounds=self.bounds, device=self.device, dtype=self.dtype ) u, log_detJ = integrator.sample(100) self.assertEqual(u.shape, (100, 2)) @@ -72,7 +72,7 @@ def forward(self, u): return u, torch.zeros(u.shape[0], dtype=self.dtype) maps = MockMaps(bounds=self.bounds, dtype=self.dtype) - integrator = Integrator(maps=maps, device=self.device, x_dtype=self.dtype) + integrator = Integrator(maps=maps, device=self.device, dtype=self.dtype) u, log_detJ = integrator.sample(100) self.assertEqual(u.shape, (100, 2)) self.assertEqual(log_detJ.shape, (100,)) @@ -117,7 +117,7 @@ def test_initialization(self): self.assertEqual(self.monte_carlo.neval, self.neval) self.assertEqual(self.monte_carlo.nbatch, self.nbatch) self.assertEqual(self.monte_carlo.device, self.device) - self.assertEqual(self.monte_carlo.x_dtype, self.dtype) + self.assertEqual(self.monte_carlo.dtype, self.dtype) self.assertTrue( torch.equal( self.monte_carlo.bounds, @@ -264,7 +264,7 @@ def test_initialization(self): self.assertEqual(self.mcmc.nbatch, self.nbatch) self.assertEqual(self.mcmc.nburnin, self.nburnin) self.assertEqual(self.mcmc.device, self.device) - self.assertEqual(self.mcmc.x_dtype, self.dtype) + self.assertEqual(self.mcmc.dtype, self.dtype) self.assertTrue( torch.equal( self.mcmc.bounds, diff --git a/src/maps.py b/src/maps.py index 28ff352..7e5b335 100644 --- a/src/maps.py +++ b/src/maps.py @@ -92,11 +92,11 @@ def __init__(self, bounds, ninc=1000, alpha=0.5, device="cpu", dtype=torch.float self._A = self.bounds[:, 1] - self.bounds[:, 0] self._jaclinear = torch.prod(self._A) - def train(self, nsamples, f, f_dim=1, fx_dtype=torch.float64, epoch=5, alpha=0.5): + def train(self, nsamples, f, f_dim=1, dtype=torch.float64, epoch=5, alpha=0.5): q0 = Uniform(self.bounds, device=self.device, dtype=self.dtype) u, log_detJ0 = q0.sample(nsamples) - fx = torch.empty(nsamples, f_dim, device=self.device, dtype=fx_dtype) + fx = torch.empty(nsamples, f_dim, device=self.device, dtype=dtype) for _ in range(epoch): x, log_detJ = self.forward(u) From e0d13eeb7e40b9a6c539cc02f20320731c656dec Mon Sep 17 00:00:00 2001 From: houpc Date: Sun, 3 Nov 2024 16:36:22 +0800 Subject: [PATCH 4/4] clean up duplicated codes --- src/integrators.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/integrators.py b/src/integrators.py index 0fe1f10..0000126 100644 --- a/src/integrators.py +++ b/src/integrators.py @@ -24,7 +24,6 @@ def __init__( device="cpu", dtype=torch.float64, ): - self.dtype = dtype self.dtype = dtype if maps: if not self.dtype == maps.dtype: