diff --git a/src/integrators.py b/src/integrators.py index ecec374..0000126 100644 --- a/src/integrators.py +++ b/src/integrators.py @@ -1,7 +1,7 @@ -from typing import Callable, Union, List, Tuple, Dict +from typing import Callable import torch from utils import RAvg -from maps import Map, Linear, CompositeMap +from maps import Linear from base import Uniform import gvar import numpy as np @@ -27,7 +27,9 @@ def __init__( self.dtype = dtype if maps: if not self.dtype == maps.dtype: - raise ValueError("Float type of maps should be same as integrator.") + raise ValueError( + "Data type of the variables of integrator should be same as maps." + ) self.bounds = maps.bounds else: if not isinstance(bounds, (list, np.ndarray)): @@ -74,50 +76,31 @@ def __init__( ): super().__init__(maps, bounds, q0, neval, nbatch, device, dtype) - def __call__(self, f: Callable, **kwargs): - x, _ = self.sample(1) - f_values = f(x) - if isinstance(f_values, (list, tuple)) and isinstance( - f_values[0], torch.Tensor - ): - f_size = len(f_values) - type_fval = f_values[0].dtype - elif isinstance(f_values, torch.Tensor): - f_size = 1 - type_fval = f_values.dtype - else: - raise TypeError( - "f must return a torch.Tensor or a list/tuple of torch.Tensor." - ) + def __call__(self, f: Callable, f_dim: int = 1, **kwargs): + x, _ = self.sample(self.nbatch) + fx = torch.empty((self.nbatch, f_dim), dtype=self.dtype, device=self.device) epoch = self.neval // self.nbatch - values = torch.zeros((self.nbatch, f_size), dtype=type_fval, device=self.device) + integ_values = torch.zeros( + (self.nbatch, f_dim), dtype=self.dtype, device=self.device + ) - for iepoch in range(epoch): + for _ in range(epoch): x, log_detJ = self.sample(self.nbatch) - f_values = f(x) - batch_results = self._multiply_by_jacobian(f_values, torch.exp(log_detJ)) - - values += batch_results / epoch - - results = np.array([RAvg() for _ in range(f_size)]) - for i in range(f_size): - _mean = values[:, i].mean().item() - _var = values[:, i].var().item() / self.nbatch + f(x, fx) + fx.mul_(log_detJ.exp_().unsqueeze_(1)) + integ_values += fx / epoch + + results = np.array([RAvg() for _ in range(f_dim)]) + for i in range(f_dim): + _mean = integ_values[:, i].mean().item() + _var = integ_values[:, i].var().item() / self.nbatch results[i].update(_mean, _var, self.neval) - if f_size == 1: + if f_dim == 1: return results[0] else: return results - def _multiply_by_jacobian(self, values, jac): - # if isinstance(values, dict): - # return {k: v * torch.exp(log_det_J) for k, v in values.items()} - if isinstance(values, (list, tuple)): - return torch.stack([v * jac for v in values], dim=-1) - else: - return torch.stack([values * jac], dim=-1) - def random_walk(dim, bounds, device, dtype, u, **kwargs): rangebounds = bounds[:, 1] - bounds[:, 0] @@ -145,9 +128,9 @@ def __init__( maps=None, bounds=None, q0=None, - neval=10000, - nbatch=None, - nburnin=500, + neval: int = 10000, + nbatch: int = None, + nburnin: int = 500, device="cpu", dtype=torch.float64, ): @@ -160,9 +143,10 @@ def __init__( def __call__( self, f: Callable, + f_dim: int = 1, proposal_dist: Callable = uniform, - thinning=1, mix_rate=0.5, + meas_freq: int = 1, **kwargs, ): epsilon = 1e-16 # Small value to ensure numerical stability @@ -170,92 +154,74 @@ def __call__( current_y, current_jac = self.q0.sample(self.nbatch) current_x, detJ = self.maps.forward(current_y) current_jac += detJ - current_jac = torch.exp(current_jac) - current_fval = f(current_x) - if isinstance(current_fval, (list, tuple)) and isinstance( - current_fval[0], torch.Tensor - ): - f_size = len(current_fval) - current_fval = sum(current_fval) - - def _integrand(x): - return sum(f(x)) - elif isinstance(current_fval, torch.Tensor): - f_size = 1 - - def _integrand(x): - return f(x) - else: - raise TypeError( - "f must return a torch.Tensor or a list/tuple of torch.Tensor." - ) - type_fval = current_fval.dtype + current_jac.exp_() + fx = torch.empty((self.nbatch, f_dim), dtype=self.dtype, device=self.device) + fx_weight = torch.empty(self.nbatch, dtype=self.dtype, device=self.device) + fx_weight[:] = f(current_x, fx) + fx_weight.abs_() - current_weight = mix_rate / current_jac + (1 - mix_rate) * current_fval.abs() + current_weight = mix_rate / current_jac + (1 - mix_rate) * fx_weight current_weight.masked_fill_(current_weight < epsilon, epsilon) - n_meas = epoch // thinning + n_meas = epoch // meas_freq def one_step(current_y, current_x, current_weight, current_jac): proposed_y = proposal_dist( self.dim, self.bounds, self.device, self.dtype, current_y, **kwargs ) proposed_x, new_jac = self.maps.forward(proposed_y) - new_jac = torch.exp(new_jac) + new_jac.exp_() - new_fval = _integrand(proposed_x) - new_weight = mix_rate / new_jac + (1 - mix_rate) * new_fval.abs() + fx_weight[:] = f(proposed_x, fx) + fx_weight.abs_() + new_weight = mix_rate / new_jac + (1 - mix_rate) * fx_weight new_weight.masked_fill_(new_weight < epsilon, epsilon) acceptance_probs = new_weight / current_weight * new_jac / current_jac accept = ( - torch.rand(self.nbatch, dtype=self.dtype, device=self.device) + torch.rand(self.nbatch, dtype=torch.float64, device=self.device) <= acceptance_probs ) current_y = torch.where(accept.unsqueeze(1), proposed_y, current_y) - # current_fval = torch.where(accept, new_fval, current_fval) current_x = torch.where(accept.unsqueeze(1), proposed_x, current_x) current_weight = torch.where(accept, new_weight, current_weight) current_jac = torch.where(accept, new_jac, current_jac) return current_y, current_x, current_weight, current_jac - for i in range(self.nburnin): + for _ in range(self.nburnin): current_y, current_x, current_weight, current_jac = one_step( current_y, current_x, current_weight, current_jac ) - values = torch.zeros((self.nbatch, f_size), dtype=type_fval, device=self.device) - refvalues = torch.zeros(self.nbatch, dtype=type_fval, device=self.device) + values = torch.zeros((self.nbatch, f_dim), dtype=self.dtype, device=self.device) + refvalues = torch.zeros(self.nbatch, dtype=self.dtype, device=self.device) - for imeas in range(n_meas): - for j in range(thinning): + for _ in range(n_meas): + for _ in range(meas_freq): current_y, current_x, current_weight, current_jac = one_step( current_y, current_x, current_weight, current_jac ) + f(current_x, fx) - batch_results = self._multiply_by_jacobian( - f(current_x), 1.0 / current_weight - ) - batch_results_ref = 1 / (current_jac * current_weight) - - values += batch_results / n_meas - refvalues += batch_results_ref / n_meas + fx.div_(current_weight.unsqueeze(1)) + values += fx / n_meas + refvalues += 1 / (current_jac * current_weight) / n_meas - results = np.array([RAvg() for _ in range(f_size)]) + results = np.array([RAvg() for _ in range(f_dim)]) results_ref = RAvg() mean_ref = refvalues.mean().item() var_ref = refvalues.var().item() / self.nbatch results_ref.update(mean_ref, var_ref, self.neval) - for i in range(f_size): + for i in range(f_dim): _mean = values[:, i].mean().item() _var = values[:, i].var().item() / self.nbatch results[i].update(_mean, _var, self.neval) - if f_size == 1: + if f_dim == 1: res = results[0] / results_ref * self._rangebounds.prod() result = RAvg(itn_results=[res], sum_neval=self.neval) return result diff --git a/src/integrators_test.py b/src/integrators_test.py index fb29d94..058d951 100644 --- a/src/integrators_test.py +++ b/src/integrators_test.py @@ -98,6 +98,15 @@ def setUp(self): self.dtype, ) + def f(self, x, fx): + fx[:, 0] = x.sum(dim=1) + return fx[:, 0] + + def f2(self, x, fx): + fx[:, 0] = x.sum(dim=1) + fx[:, 1] = x.prod(dim=1) + return fx.mean(dim=1) + def tearDown(self): # Teardown after tests pass @@ -119,10 +128,7 @@ def test_initialization(self): def test_call_single_tensor_output(self): # Test the __call__ method with a function that returns a single tensor - def f(x): - return x.sum(dim=1) - - result = self.monte_carlo(f) + result = self.monte_carlo(self.f) self.assertIsInstance(result, RAvg) self.assertEqual(result.shape, ()) self.assertIsInstance(result.mean, float) @@ -130,10 +136,7 @@ def f(x): def test_call_multiple_tensor_output(self): # Test the __call__ method with a function that returns a list of tensors - def f(x): - return [x.sum(dim=1), x.prod(dim=1)] - - result = self.monte_carlo(f) + result = self.monte_carlo(self.f2, f_dim=2) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.shape, (2,)) self.assertEqual(result.dtype, RAvg) @@ -146,25 +149,6 @@ def f(x): with self.assertRaises(TypeError): self.monte_carlo(f) - def test_multiply_by_jacobian_single_tensor(self): - # Test the _multiply_by_jacobian method with a single tensor - values = torch.tensor([1.0, 2.0, 3.0], dtype=self.dtype) - jac = torch.tensor([0.5, 0.5, 0.5], dtype=self.dtype) - result = self.monte_carlo._multiply_by_jacobian(values, jac) - expected = torch.tensor([[0.5], [1.0], [1.5]], dtype=self.dtype) - self.assertTrue(torch.allclose(result, expected)) - - def test_multiply_by_jacobian_multiple_tensors(self): - # Test the _multiply_by_jacobian method with a list of tensors - values = [ - torch.tensor([1.0, 2.0, 3.0], dtype=self.dtype), - torch.tensor([4.0, 5.0, 6.0], dtype=self.dtype), - ] - jac = torch.tensor([0.5, 0.5, 0.5], dtype=self.dtype) - result = self.monte_carlo._multiply_by_jacobian(values, jac) - expected = torch.tensor([[0.5, 2.0], [1.0, 2.5], [1.5, 3.0]], dtype=self.dtype) - self.assertTrue(torch.allclose(result, expected)) - def test_sample_method(self): # Test the sample method to ensure it returns the correct shape x, log_detJ = self.monte_carlo.sample(self.nbatch) @@ -172,11 +156,7 @@ def test_sample_method(self): self.assertEqual(log_detJ.shape, (self.nbatch,)) def test_call_with_cuda(self): - # Test the __call__ method with different device values - def f(x): - return x.sum(dim=1) - - # Test with device = "cuda" if available + # Test the __call__ method with device = "cuda" if available if torch.cuda.is_available(): monte_carlo_cuda = MonteCarlo( self.maps, @@ -187,7 +167,7 @@ def f(x): "cuda", self.dtype, ) - result_cuda = monte_carlo_cuda(f) + result_cuda = monte_carlo_cuda(self.f) self.assertIsInstance(result_cuda, RAvg) self.assertIsInstance(result_cuda.mean, float) self.assertIsInstance(result_cuda.sdev, float) @@ -195,9 +175,6 @@ def f(x): def test_call_with_different_dtype(self): # Test the __call__ method with different dtype values - def f(x): - return x.sum(dim=1) - # Test with dtype = torch.float32 monte_carlo_float32 = MonteCarlo( self.maps, @@ -208,7 +185,7 @@ def f(x): self.device, torch.float32, ) - result_float32 = monte_carlo_float32(f) + result_float32 = monte_carlo_float32(self.f) self.assertIsInstance(result_float32, RAvg) self.assertIsInstance(result_float32.mean, float) self.assertIsInstance(result_float32.sdev, float) @@ -216,9 +193,6 @@ def f(x): def test_call_with_different_bounds(self): # Test the __call__ method with different bounds values - def f(x): - return x.sum(dim=1) - # Test with bounds = [0, 2] monte_carlo_bounds_0_2 = MonteCarlo( self.maps, @@ -229,7 +203,7 @@ def f(x): self.device, self.dtype, ) - result_bounds_0_2 = monte_carlo_bounds_0_2(f) + result_bounds_0_2 = monte_carlo_bounds_0_2(self.f) self.assertIsInstance(result_bounds_0_2, RAvg) self.assertEqual(result_bounds_0_2.shape, ()) @@ -243,7 +217,7 @@ def f(x): self.device, self.dtype, ) - result_bounds_minus1_1 = monte_carlo_bounds_minus1_1(f) + result_bounds_minus1_1 = monte_carlo_bounds_minus1_1(self.f) self.assertIsInstance(result_bounds_minus1_1, RAvg) self.assertEqual(result_bounds_minus1_1.shape, ()) @@ -270,6 +244,15 @@ def setUp(self): self.dtype, ) + def f(self, x, fx): + fx[:, 0] = x.sum(dim=1) + return fx[:, 0] + + def f2(self, x, fx): + fx[:, 0] = x.sum(dim=1) + fx[:, 1] = x.prod(dim=1) + return fx.mean(dim=1) + def tearDown(self): # Teardown after tests pass @@ -292,10 +275,7 @@ def test_initialization(self): def test_call_single_tensor_output(self): # Test the __call__ method with a function that returns a single tensor - def f(x): - return x.sum(dim=1) - - result = self.mcmc(f) + result = self.mcmc(self.f) self.assertIsInstance(result, RAvg) self.assertEqual(result.shape, ()) self.assertIsInstance(result.mean, float) @@ -303,10 +283,7 @@ def f(x): def test_call_multiple_tensor_output(self): # Test the __call__ method with a function that returns a list of tensors - def f(x): - return [x.sum(dim=1), x.prod(dim=1)] - - result = self.mcmc(f) + result = self.mcmc(self.f2, f_dim=2) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.shape, (2,)) self.assertEqual(result.dtype, RAvg) @@ -320,11 +297,7 @@ def f(x): self.mcmc(f) def test_call_with_different_device(self): - # Test the __call__ method with different device values - def f(x): - return x.sum(dim=1) - - # Test with device = "cuda" if available + # Test the __call__ method with device = "cuda" if available if torch.cuda.is_available(): mcmc_cuda = MCMC( self.maps, @@ -336,15 +309,12 @@ def f(x): "cuda", self.dtype, ) - result_cuda = mcmc_cuda(f) + result_cuda = mcmc_cuda(self.f) self.assertIsInstance(result_cuda, RAvg) self.assertEqual(result_cuda.shape, ()) def test_call_with_different_dtype(self): # Test the __call__ method with different dtype values - def f(x): - return x.sum(dim=1) - # Test with dtype = torch.float32 mcmc_float32 = MCMC( self.maps, @@ -356,7 +326,7 @@ def f(x): self.device, torch.float32, ) - result_float32 = mcmc_float32(f) + result_float32 = mcmc_float32(self.f) self.assertIsInstance(result_float32, RAvg) self.assertEqual(result_float32.shape, ()) self.assertIsInstance(result_float32.mean, float) @@ -364,9 +334,6 @@ def f(x): def test_call_with_different_bounds(self): # Test the __call__ method with different bounds values - def f(x): - return x.sum(dim=1) - # Test with bounds = [0, 2] mcmc_bounds = MCMC( self.maps, @@ -378,24 +345,21 @@ def f(x): self.device, self.dtype, ) - result_bounds = mcmc_bounds(f) + result_bounds = mcmc_bounds(self.f) self.assertIsInstance(result_bounds, RAvg) self.assertEqual(result_bounds.shape, ()) def test_call_with_different_proposal_dist(self): # Test the __call__ method with different proposal distributions - def f(x): - return x.sum(dim=1) - from integrators import random_walk, gaussian # Test with uniform proposal distribution - result_rw = self.mcmc(f, proposal_dist=random_walk) + result_rw = self.mcmc(self.f, proposal_dist=random_walk) self.assertIsInstance(result_rw, RAvg) self.assertEqual(result_rw.shape, ()) # Test with normal proposal distribution - result_normal = self.mcmc(f, proposal_dist=gaussian) + result_normal = self.mcmc(self.f, proposal_dist=gaussian) self.assertIsInstance(result_normal, RAvg) self.assertEqual(result_normal.shape, ()) diff --git a/src/maps.py b/src/maps.py index 6f6fc1d..7e5b335 100644 --- a/src/maps.py +++ b/src/maps.py @@ -77,35 +77,31 @@ def __init__(self, bounds, ninc=1000, alpha=0.5, device="cpu", dtype=torch.float elif isinstance(ninc, torch.Tensor): self.ninc = ninc.to(dtype=torch.int32, device=device) else: - raise ValueError("'ninc' must be an int, list, numpy array, or torch tensor.") - + raise ValueError( + "'ninc' must be an int, list, numpy array, or torch tensor." + ) + # Ensure ninc has the correct shape if self.ninc.shape != (self.dim,): - raise ValueError(f"'ninc' must be a scalar or a 1D array of length {self.dim}.") + raise ValueError( + f"'ninc' must be a scalar or a 1D array of length {self.dim}." + ) self.make_uniform() self.alpha = alpha self._A = self.bounds[:, 1] - self.bounds[:, 0] self._jaclinear = torch.prod(self._A) - def train(self, nsamples, f, epoch=5, alpha=0.5): + def train(self, nsamples, f, f_dim=1, dtype=torch.float64, epoch=5, alpha=0.5): q0 = Uniform(self.bounds, device=self.device, dtype=self.dtype) u, log_detJ0 = q0.sample(nsamples) - fval = f(u) - f_size = len(fval) if isinstance(fval, (list, tuple)) else 1 - if f_size > 1: - - def _integrand(x): - return sum(f(x)) - else: - - def _integrand(x): - return f(x) + fx = torch.empty(nsamples, f_dim, device=self.device, dtype=dtype) for _ in range(epoch): x, log_detJ = self.forward(u) - f2 = torch.exp(2 * (log_detJ + log_detJ0)) * _integrand(x) ** 2 + fx_weight = f(x, fx) + f2 = torch.exp(2 * (log_detJ + log_detJ0)) * fx_weight**2 self.add_training_data(u, f2) self.adapt(alpha) diff --git a/src/maps_test.py b/src/maps_test.py index 97f12ff..7f153fe 100644 --- a/src/maps_test.py +++ b/src/maps_test.py @@ -170,8 +170,9 @@ def test_inverse(self): def test_train(self): # Test training the Vegas class - def f(x): - return torch.sum(x, dim=1) + def f(x, fx): + fx[:, 0] = torch.sum(x, dim=1) + return fx[:, 0] nsamples = 100 epoch = 5 diff --git a/src/mc_test.py b/src/mc_test.py index 65da182..e038226 100644 --- a/src/mc_test.py +++ b/src/mc_test.py @@ -5,40 +5,39 @@ from utils import set_seed, get_device set_seed(42) -# device = get_device() -device = torch.device("cpu") +device = get_device() +# device = torch.device("cpu") def test_nothing(): pass -def unit_circle_integrand(x): - inside_circle = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() - return inside_circle +def unit_circle_integrand(x, f): + f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() + return f[:, 0] -def half_sphere_integrand(x): - return torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 +def half_sphere_integrand(x, f): + f[:, 0] = torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) * 2 + return f[:, 0] -def integrand_list(x): - return [unit_circle_integrand(x), half_sphere_integrand(x) / 2] +def two_integrands(x, f): + f[:, 0] = (x[:, 0] ** 2 + x[:, 1] ** 2 < 1).double() + f[:, 1] = -torch.clamp(1 - (x[:, 0] ** 2 + x[:, 1] ** 2), min=0) + return f.mean(dim=-1) -def integrand_list1(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - f = torch.exp(-200 * dx2) - return [f, f * x[:, 0], f * x[:, 0] ** 2] - - -def sharp_peak(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - return torch.exp(-200 * dx2) +def sharp_integrands(x, f, dim=4): + f.zero_() + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + f[:, 0] *= -200 + f[:, 0].exp_() + f[:, 1] = f[:, 0] * x[:, 0] + f[:, 2] = f[:, 0] * x[:, 0] ** 2 + return f.mean(dim=-1) dim = 2 @@ -116,25 +115,25 @@ def sharp_peak(x): print("\nCalculate the integral [f(x1, x2), g(x1, x2)/2] in the bounds [-1, 1]^2") # Two integrands -res = mc_integrator(integrand_list) +res = mc_integrator(two_integrands, f_dim=2) print("Plain MC Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) -res = mcmc_integrator(integrand_list, mix_rate=0.5) +res = mcmc_integrator(two_integrands, f_dim=2, mix_rate=0.5) print("MCMC Integral results:") print(f" Integral 1: ", res[0]) print(f" Integral 2: ", res[1]) # print("VEAGS map is trained for g(x1, x2)") vegas_map.make_uniform() -vegas_map.train(20000, integrand_list, epoch=10, alpha=0.5) -res = vegas_integrator(integrand_list) +vegas_map.train(20000, two_integrands, f_dim=2, epoch=10, alpha=0.5) +res = vegas_integrator(two_integrands, f_dim=2) print("VEGAS Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) -res = vegasmcmc_integrator(integrand_list, mix_rate=0.5) +res = vegasmcmc_integrator(two_integrands, f_dim=2, mix_rate=0.5) print("VEGAS-MCMC Integral results:") print(" Integral 1: ", res[0]) print(" Integral 2: ", res[1]) @@ -154,7 +153,7 @@ def sharp_peak(x): bounds=bounds, neval=n_eval, nbatch=n_batch, nburnin=n_therm, device=device ) print("Plain MC Integral results:") -res = mc_integrator(integrand_list1) +res = mc_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -166,7 +165,7 @@ def sharp_peak(x): res[1] / res[0], ) print("MCMC Integral results:") -res = mcmc_integrator(integrand_list1, mix_rate=0.5) +res = mcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0], @@ -180,8 +179,7 @@ def sharp_peak(x): vegas_map = Vegas(bounds, device=device) print("train VEGAS map for h(X)...") -# vegas_map.train(20000, sharp_peak, epoch=10, alpha=0.5) -vegas_map.train(20000, integrand_list1, epoch=10, alpha=0.5) +vegas_map.train(20000, sharp_integrands, f_dim=3, epoch=10, alpha=0.5) print("VEGAS Integral results:") vegas_integrator = MonteCarlo( @@ -191,7 +189,7 @@ def sharp_peak(x): # nbatch=n_eval, device=device, ) -res = vegas_integrator(integrand_list1) +res = vegas_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -211,7 +209,7 @@ def sharp_peak(x): nburnin=n_therm, device=device, ) -res = vegasmcmc_integrator(integrand_list1, mix_rate=0.5) +res = vegasmcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0], diff --git a/src/vegas_test.py b/src/vegas_test.py index addc8fc..a1e847e 100644 --- a/src/vegas_test.py +++ b/src/vegas_test.py @@ -4,28 +4,34 @@ from maps import Vegas, Linear from utils import set_seed, get_device -# set_seed(42) -# device = get_device() -device = torch.device("cpu") +set_seed(42) +device = get_device() +# device = torch.device("cpu") -def integrand_list1(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - f = torch.exp(-200 * dx2) - return [f, f * x[:, 0], f * x[:, 0] ** 2] +def sharp_peak(x, f, dim=4): + f.zero_() + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + f[:, 0] *= -200 + f[:, 0].exp_() + return f[:, 0] -def sharp_peak(x): - dx2 = torch.zeros(x.shape[0], dtype=x.dtype, device=x.device) - for d in range(4): - dx2 += (x[:, d] - 0.5) ** 2 - return torch.exp(-200 * dx2) +def sharp_integrands(x, f, dim=4): + f.zero_() + for d in range(dim): + f[:, 0] += (x[:, d] - 0.5) ** 2 + f[:, 0] *= -200 + f[:, 0].exp_() + f[:, 1] = f[:, 0] * x[:, 0] + f[:, 2] = f[:, 0] * x[:, 0] ** 2 + return f.mean(dim=-1) -def func(x): - return torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) +def func(x, f): + f[:, 0] = torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) + return f[:, 0] ninc = 1000 @@ -76,7 +82,7 @@ def func(x): nbatch=n_batch, device=device, ) -res = vegas_integrator(integrand_list1) +res = vegas_integrator(sharp_integrands, f_dim=3) print( " I[0] =", res[0], @@ -102,7 +108,7 @@ def func(x): nburnin=n_therm, device=device, ) -res = vegasmcmc_integrator(integrand_list1, mix_rate=0.5) +res = vegasmcmc_integrator(sharp_integrands, f_dim=3, mix_rate=0.5) print( " I[0] =", res[0],