From aa4e0e3e53eb024740c10b2656797c5b00f9b27a Mon Sep 17 00:00:00 2001 From: houpc Date: Thu, 30 Oct 2025 18:23:06 +0800 Subject: [PATCH 1/6] update readme and license --- README.md | 141 +++++++++++++++++++++++++++++++++++++++++++++++++++++ license.md | 7 +++ 2 files changed, 148 insertions(+) create mode 100644 license.md diff --git a/README.md b/README.md index bccdd05..2e7618c 100644 --- a/README.md +++ b/README.md @@ -2,3 +2,144 @@ [![alpha](https://img.shields.io/badge/docs-alpha-blue.svg)](https://numericaleft.github.io/MCintegration.py/) [![Build Status](https://github.com/numericalEFT/MCIntegration.py/workflows/CI/badge.svg)](https://github.com/numericalEFT/MCIntegration.py/actions) [![codecov](https://codecov.io/gh/numericalEFT/MCintegration.py/graph/badge.svg?token=851N2CNOTN)](https://codecov.io/gh/numericalEFT/MCintegration.py) +A Python library for Monte Carlo integration with support for multi-CPU and GPU computations. + +## Overview + +MCintegration is a specialized library designed for numerical integration using Monte Carlo methods. It provides efficient implementations of various integration algorithms with focus on applications in computational physics and effective field theories (EFT). + +The library offers: +- Multiple Monte Carlo integration algorithms +- Support for multi-CPU parallelization +- GPU acceleration capabilities +- Integration with PyTorch for tensor-based computations + +## Installation + +```bash +pip install mcintegration +``` + +Or install from source: + +```bash +python setup.py install +``` + +## Usage + +### Example 1: Unit Circle Integration + +This example demonstrates different Monte Carlo methods for integrating functions over [-1,1]×[-1,1]: + +```python +from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas +import torch + +# Define integrand function +def unit_circle(x, f): + r2 = x[:, 0]**2 + x[:, 1]**2 + f[:, 0] = (r2 <= 1).float() + return f.mean(dim=-1) + +# Set up integration parameters +dim = 2 +bounds = [(-1, 1)] * dim +n_eval = 6400000 +batch_size = 10000 +n_therm = 100 + +# Create integrator instances +mc = MonteCarlo(f=unit_circle, bounds=bounds, batch_size=batch_size) +mcmc = MarkovChainMonteCarlo(f=unit_circle, bounds=bounds, batch_size=batch_size, nburnin=n_therm) + +# Perform integration +result_mc = mc(n_eval) +result_mcmc = mcmc(n_eval) +``` + +### Example 2: Singular Function Integration + +This example shows integration of a function with a singularity at x=0: + +```python +# Integrate log(x)/sqrt(x) which has a singularity at x=0 +def singular_func(x, f): + f[:, 0] = torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) + return f[:, 0] + +# Set up integration parameters +dim = 1 +bounds = [(0, 1)] +n_eval = 6400000 +batch_size = 10000 +n_therm = 100 + +# Use VEGAS algorithm which adapts to the singular structure +vegas_map = Vegas(dim, ninc=1000) +vegas_map.adaptive_training(batch_size, singular_func) + +# Create integrator instances using the adapted vegas map +vegas_mc = MonteCarlo(f=singular_func, bounds=bounds, batch_size=batch_size, maps=vegas_map) +vegas_mcmc = MarkovChainMonteCarlo(f=singular_func, bounds=bounds, batch_size=batch_size, nburnin=n_therm, maps=vegas_map) + +# Perform integration +result_vegas = vegas_mc(n_eval) +result_vegas_mcmc = vegas_mcmc(n_eval) +``` + +### Example 3: Multiple Sharp Peak Integrands in Higher Dimensions + +This example demonstrates integration of a sharp Gaussian peak and its moments in 4D space: + +```python +# Define a sharp peak and its moments integrands +# This represents a Gaussian peak centered at (0.5, 0.5, 0.5, 0.5) +def sharp_integrands(x, f): + f[:, 0] = torch.sum((x - 0.5) ** 2, dim=-1) # Distance from center + f[:, 0] *= -200 # Scale by width parameter + f[:, 0].exp_() # Exponentiate to create Gaussian + f[:, 1] = f[:, 0] * x[:, 0] # First moment + f[:, 2] = f[:, 0] * x[:, 0] ** 2 # Second moment + return f.mean(dim=-1) + +# Set up 4D integration with sharp peak +dim = 4 +bounds = [(0, 1)] * dim +n_eval = 6400000 +batch_size = 10000 +n_therm = 100 + +# Use VEGAS algorithm which adapts to the peak structure +vegas_map = Vegas(dim, ninc=1000) +vegas_map.adaptive_training(batch_size, sharp_integrands, f_dim=3) + +# Create integrator instances using the adapted vegas map +vegas_mc = MonteCarlo(f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, maps=vegas_map) +vegas_mcmc = MarkovChainMonteCarlo(f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, nburnin=n_therm, maps=vegas_map) + +# Perform integration +result_vegas = vegas_mc(n_eval) +result_vegas_mcmc = vegas_mcmc(n_eval) +``` + +## Features + +- **Base integration methods**: Core Monte Carlo algorithms in `MCintegration/base.py` +- **Integrator implementations**: Various MC integration strategies in `MCintegration/integrators.py` +- **Variable transformations**: Coordinate mapping utilities in `MCintegration/maps.py` +- **Utility functions**: Helper functions for numerical computations in `MCintegration/utils.py` +- **Multi-CPU support**: Parallel processing capabilities demonstrated in `MCintegration/mc_multicpu_test.py` +- **GPU acceleration**: CUDA-enabled functions through PyTorch in the examples directory + + +## Requirements + +- Python 3.7+ +- NumPy +- PyTorch +- gvar + +## Acknowledgements and Related Packages +The development of `MCIntegration.py` has been greatly inspired and influenced by `vegas` package. We would like to express our appreciation to the following: +- [vegas](https://github.com/gplepage/vegas) A Python package offering Monte Carlo estimations of multidimensional integrals, with notable improvements on the original Vegas algorithm. It's been a valuable reference for us. Learn more from the vegas [documentation](https://vegas.readthedocs.io/). **Reference: G. P. Lepage, J. Comput. Phys. 27, 192 (1978) and G. P. Lepage, J. Comput. Phys. 439, 110386 (2021) [arXiv:2009.05112](https://arxiv.org/abs/2009.05112)**. \ No newline at end of file diff --git a/license.md b/license.md new file mode 100644 index 0000000..b964bdf --- /dev/null +++ b/license.md @@ -0,0 +1,7 @@ +Copyright (c) 2025: Pengcheng Hou, Tao Wang, Caiyu Fan, and Kun Chen. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file From c6477bbd424be801476fc2fffdb3c5be2de99352 Mon Sep 17 00:00:00 2001 From: fsxbhyy Date: Fri, 31 Oct 2025 15:54:44 +0800 Subject: [PATCH 2/6] update README --- README.md | 350 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 276 insertions(+), 74 deletions(-) diff --git a/README.md b/README.md index 2e7618c..b0bfa92 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,20 @@ -# MCintegration -[![alpha](https://img.shields.io/badge/docs-alpha-blue.svg)](https://numericaleft.github.io/MCintegration.py/) -[![Build Status](https://github.com/numericalEFT/MCIntegration.py/workflows/CI/badge.svg)](https://github.com/numericalEFT/MCIntegration.py/actions) -[![codecov](https://codecov.io/gh/numericalEFT/MCintegration.py/graph/badge.svg?token=851N2CNOTN)](https://codecov.io/gh/numericalEFT/MCintegration.py) -A Python library for Monte Carlo integration with support for multi-CPU and GPU computations. +# MCintegration: Monte Carlo Integration in Python -## Overview +## Why Choose MCintegration? -MCintegration is a specialized library designed for numerical integration using Monte Carlo methods. It provides efficient implementations of various integration algorithms with focus on applications in computational physics and effective field theories (EFT). +MCintegration is a comprehensive Python package designed to handle both regular and singular high-dimensional integrals with ease. Its implementation of robust Monte Carlo integration methods makes it a versatile tool in various scientific domains, including high-energy physics, material science, computational chemistry, financial mathematics, and machine learning. -The library offers: -- Multiple Monte Carlo integration algorithms -- Support for multi-CPU parallelization -- GPU acceleration capabilities -- Integration with PyTorch for tensor-based computations +The package leverages **PyTorch** for efficient tensor computations, enabling: +- **GPU acceleration** for massive performance gains on compatible hardware +- **Batched computations** for efficient parallel processing +- **Multi-CPU parallelization** for distributed workloads + +Whether you're dealing with simple low-dimensional integrals or complex high-dimensional integrals with sharp peaks and singularities, MCintegration provides the tools you need with a simple, intuitive API. ## Installation +Install via pip: + ```bash pip install mcintegration ``` @@ -23,123 +22,326 @@ pip install mcintegration Or install from source: ```bash +git clone https://github.com/your-repo/mcintegration +cd mcintegration python setup.py install ``` -## Usage +## Requirements + +- Python 3.7+ +- NumPy +- PyTorch +- gvar + +## Quick Start -### Example 1: Unit Circle Integration +MCintegration simplifies complex integral calculations. Here are two examples to get you started. -This example demonstrates different Monte Carlo methods for integrating functions over [-1,1]×[-1,1]: +### Example: Estimating π + +Let's estimate π by computing the area of a unit circle: ```python -from MCintegration import MonteCarlo, MarkovChainMonteCarlo, Vegas import torch +from MCintegration import MonteCarlo -# Define integrand function -def unit_circle(x, f): +# Define integrand: indicator function for unit circle +def circle(x, f): r2 = x[:, 0]**2 + x[:, 1]**2 f[:, 0] = (r2 <= 1).float() return f.mean(dim=-1) -# Set up integration parameters -dim = 2 -bounds = [(-1, 1)] * dim -n_eval = 6400000 -batch_size = 10000 -n_therm = 100 +# Integrate over [-1,1] × [-1,1] +mc = MonteCarlo(f=circle, bounds=[(-1, 1), (-1, 1)], batch_size=10000) +result = mc(1000000) +pi_estimate = result * 4 # Circle area / square area * 4 +print(f"π ≈ {pi_estimate}") +``` + +## Understanding the Integrand Function + +All integrand functions in MCintegration follow a specific signature: + +```python +def integrand(x, f) -> result: + """ + Args: + x: PyTorch tensor of shape (batch_size, dim) - input sample points + f: PyTorch tensor of shape (batch_size, f_dim) - output buffer + + Returns: + Reduced result, typically f.mean(dim=-1) for single output + or f for multiple outputs + """ + # Your computation here + f[:, 0] = ... # Store result(s) + return f.mean(dim=-1) # or return f for multiple outputs +``` + +**Key points:** +- **x**: Contains batches of random points sampled from the integration domain +- **f**: Pre-allocated tensor where you store function values +- **Return value**: Usually `f.mean(dim=-1)` for variance reduction with single outputs, or `f` directly for multiple outputs +- **Batched computation**: All operations should be vectorized using PyTorch for efficiency + +## Core Integration Methods + +MCintegration provides several Monte Carlo integration algorithms: -# Create integrator instances -mc = MonteCarlo(f=unit_circle, bounds=bounds, batch_size=batch_size) -mcmc = MarkovChainMonteCarlo(f=unit_circle, bounds=bounds, batch_size=batch_size, nburnin=n_therm) +### 1. Standard Monte Carlo (`MonteCarlo`) -# Perform integration -result_mc = mc(n_eval) -result_mcmc = mcmc(n_eval) +The classic Monte Carlo algorithm uses uniform random sampling. It's efficient for low-dimensional integrals and well-behaved functions. + +```python +from MCintegration import MonteCarlo + +mc = MonteCarlo( + f=integrand_function, + bounds=[(min, max), ...], + batch_size=10000, + f_dim=1 # Number of output dimensions +) +result = mc(n_eval) # n_eval total function evaluations ``` -### Example 2: Singular Function Integration -This example shows integration of a function with a singularity at x=0: +### 2. Markov Chain Monte Carlo (`MarkovChainMonteCarlo`) + +MCMC uses the Metropolis-Hastings algorithm to generate correlated samples. This can be more efficient for certain types of integrands. ```python -# Integrate log(x)/sqrt(x) which has a singularity at x=0 +from MCintegration import MarkovChainMonteCarlo + +mcmc = MarkovChainMonteCarlo( + f=integrand_function, + bounds=[(min, max), ...], + batch_size=10000, + nburnin=100, # Thermalization/burn-in steps + f_dim=1 +) +result = mcmc(n_eval) +``` + + +**Important parameters:** +- **nburnin**: Number of initial steps to discard while the chain reaches equilibrium +- Higher nburnin values may be needed for complex distributions + +### 3. VEGAS Algorithm with Adaptive Importance Sampling + +VEGAS uses **adaptive importance sampling** to concentrate samples in regions where the integrand is large, dramatically improving accuracy for difficult integrals. + +```python +from MCintegration import Vegas, MonteCarlo + +# Create and train the VEGAS map +vegas_map = Vegas(dim, ninc=1000) +vegas_map.adaptive_training(batch_size, integrand_function, f_dim=1) + +# Use the adapted map with any integrator +mc = MonteCarlo( + f=integrand_function, + bounds=bounds, + batch_size=batch_size, + maps=vegas_map +) +result = mc(n_eval) +``` + +**When to use VEGAS:** +- Functions with singularities or near-singularities +- Sharply peaked functions (e.g., Gaussians with small width) +- Functions where most contribution comes from small regions + +**VEGAS parameters:** +- **ninc**: Number of increments in the adaptive grid (more = finer adaptation) +- Higher values allow better adaptation but increase memory usage +- Typical values: 100-1000 for low dimensions, 50-100 for high dimensions + +## Detailed Examples + +### Example1: Singular Function Integration + +Handle a function with a singularity at x=0 using VEGAS adaptive sampling. + +```python +import torch +from MCintegration import Vegas, MonteCarlo, MarkovChainMonteCarlo + +# Define singular function: log(x)/√x def singular_func(x, f): - f[:, 0] = torch.log(x[:, 0]) / torch.sqrt(x[:, 0]) + """ + Integrand with singularity at x=0. + The integral ∫₀¹ log(x)/√x dx = -4 (analytical result) + """ + f[:, 0] = torch.where(x[:, 0] < 1e-14, + 0.0, + torch.log(x[:, 0]) / torch.sqrt(x[:, 0])) return f[:, 0] -# Set up integration parameters +# Integration parameters dim = 1 -bounds = [(0, 1)] +bounds = [(0, 1)] # Note: includes singular point x=0 n_eval = 6400000 batch_size = 10000 n_therm = 100 -# Use VEGAS algorithm which adapts to the singular structure +# VEGAS adaptive training vegas_map = Vegas(dim, ninc=1000) vegas_map.adaptive_training(batch_size, singular_func) -# Create integrator instances using the adapted vegas map -vegas_mc = MonteCarlo(f=singular_func, bounds=bounds, batch_size=batch_size, maps=vegas_map) -vegas_mcmc = MarkovChainMonteCarlo(f=singular_func, bounds=bounds, batch_size=batch_size, nburnin=n_therm, maps=vegas_map) - -# Perform integration +# Standard MC with VEGAS map +vegas_mc = MonteCarlo( + f=singular_func, + bounds=bounds, + batch_size=batch_size, + maps=vegas_map +) result_vegas = vegas_mc(n_eval) +print(f"VEGAS-MC: {result_vegas} (expected: -4)") + +# MCMC with VEGAS map +vegas_mcmc = MarkovChainMonteCarlo( + f=singular_func, + bounds=bounds, + batch_size=batch_size, + nburnin=n_therm, + maps=vegas_map +) result_vegas_mcmc = vegas_mcmc(n_eval) +print(f"VEGAS-MCMC: {result_vegas_mcmc} (expected: -4)") ``` -### Example 3: Multiple Sharp Peak Integrands in Higher Dimensions +**Why VEGAS helps:** +- Standard MC samples uniformly, missing the singularity structure +- VEGAS learns to concentrate samples near x=0 where |log(x)/√x| is large +- This dramatically reduces the variance and improves accuracy + +**The VEGAS workflow:** +1. **Create map**: `Vegas(dim, ninc)` initializes the adaptive grid +2. **Train map**: `adaptive_training()` learns the integrand structure +3. **Use map**: Pass `maps=vegas_map` to sample from adapted distribution + +--- -This example demonstrates integration of a sharp Gaussian peak and its moments in 4D space: +### Example 2: Sharp Gaussian Peak in High Dimensions + +Compute multiple related quantities (normalization and moments) for a sharp 4D Gaussian. ```python -# Define a sharp peak and its moments integrands -# This represents a Gaussian peak centered at (0.5, 0.5, 0.5, 0.5) +import torch +from MCintegration import Vegas, MonteCarlo, MarkovChainMonteCarlo + +# Define sharp Gaussian and its moments def sharp_integrands(x, f): - f[:, 0] = torch.sum((x - 0.5) ** 2, dim=-1) # Distance from center - f[:, 0] *= -200 # Scale by width parameter - f[:, 0].exp_() # Exponentiate to create Gaussian - f[:, 1] = f[:, 0] * x[:, 0] # First moment - f[:, 2] = f[:, 0] * x[:, 0] ** 2 # Second moment + """ + Computes three related integrals: + 1. Normalization: ∫ exp(-200·|x-0.5|²) dx + 2. First moment: ∫ x₀·exp(-200·|x-0.5|²) dx + 3. Second moment: ∫ x₀²·exp(-200·|x-0.5|²) dx + + The peak is centered at (0.5, 0.5, 0.5, 0.5) with width σ ≈ 0.05 + """ + # Squared distance from center + dist2 = torch.sum((x - 0.5) ** 2, dim=-1) + + # Sharp Gaussian: exp(-200·dist²) + gaussian = torch.exp(-200 * dist2) + + # Store three outputs + f[:, 0] = gaussian # Normalization + f[:, 1] = gaussian * x[:, 0] # First moment + f[:, 2] = gaussian * x[:, 0] ** 2 # Second moment + return f.mean(dim=-1) -# Set up 4D integration with sharp peak -dim = 4 -bounds = [(0, 1)] * dim +# Integration parameters +dim = 4 # 4D integration +bounds = [(0, 1)] * dim # Unit hypercube [0,1]⁴ n_eval = 6400000 batch_size = 10000 n_therm = 100 -# Use VEGAS algorithm which adapts to the peak structure +# VEGAS adaptive training for all three outputs vegas_map = Vegas(dim, ninc=1000) vegas_map.adaptive_training(batch_size, sharp_integrands, f_dim=3) -# Create integrator instances using the adapted vegas map -vegas_mc = MonteCarlo(f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, maps=vegas_map) -vegas_mcmc = MarkovChainMonteCarlo(f=sharp_integrands, f_dim=3, bounds=bounds, batch_size=batch_size, nburnin=n_therm, maps=vegas_map) - -# Perform integration +# Integration with VEGAS map +vegas_mc = MonteCarlo( + f=sharp_integrands, + f_dim=3, # Three simultaneous outputs + bounds=bounds, + batch_size=batch_size, + maps=vegas_map +) result_vegas = vegas_mc(n_eval) +print(f"Results: {result_vegas}") +print(f" Normalization: {result_vegas[0]}") +print(f" First moment: {result_vegas[1]}") +print(f" Second moment: {result_vegas[2]}") + +# MCMC alternative +vegas_mcmc = MarkovChainMonteCarlo( + f=sharp_integrands, + f_dim=3, + bounds=bounds, + batch_size=batch_size, + nburnin=n_therm, + maps=vegas_map +) result_vegas_mcmc = vegas_mcmc(n_eval) +print(f"MCMC Results: {result_vegas_mcmc}") ``` -## Features +**Multi-output integration:** +- **f_dim=3**: Tells the integrator to expect 3 outputs +- All three integrals use the **same sample points**, improving efficiency +- The samples are correlated, which is beneficial for computing related quantities +- Must specify `f_dim` in both `adaptive_training()` and integrator initialization -- **Base integration methods**: Core Monte Carlo algorithms in `MCintegration/base.py` -- **Integrator implementations**: Various MC integration strategies in `MCintegration/integrators.py` -- **Variable transformations**: Coordinate mapping utilities in `MCintegration/maps.py` -- **Utility functions**: Helper functions for numerical computations in `MCintegration/utils.py` -- **Multi-CPU support**: Parallel processing capabilities demonstrated in `MCintegration/mc_multicpu_test.py` -- **GPU acceleration**: CUDA-enabled functions through PyTorch in the examples directory +**High-dimensional integration:** +As dimension increases: +- Volume grows exponentially (curse of dimensionality) +- Uniform sampling becomes exponentially inefficient +- Sharp features become impossible to capture without adaptation +- VEGAS becomes essential for accurate results +--- -## Requirements +## Contributing -- Python 3.7+ -- NumPy -- PyTorch -- gvar +Contributions are welcome! Please: +1. Fork the repository +2. Create a feature branch +3. Add tests for new functionality +4. Submit a pull request + +## License + +[Your license information here] + +## Citation + +If you use MCintegration in your research, please cite: + +```bibtex +@software{mcintegration, + title = {MCintegration: Monte Carlo Integration in Python}, + author = {[Your Name]}, + year = {2024}, + url = {https://github.com/your-repo/mcintegration} +} +``` ## Acknowledgements and Related Packages -The development of `MCIntegration.py` has been greatly inspired and influenced by `vegas` package. We would like to express our appreciation to the following: -- [vegas](https://github.com/gplepage/vegas) A Python package offering Monte Carlo estimations of multidimensional integrals, with notable improvements on the original Vegas algorithm. It's been a valuable reference for us. Learn more from the vegas [documentation](https://vegas.readthedocs.io/). **Reference: G. P. Lepage, J. Comput. Phys. 27, 192 (1978) and G. P. Lepage, J. Comput. Phys. 439, 110386 (2021) [arXiv:2009.05112](https://arxiv.org/abs/2009.05112)**. \ No newline at end of file + +The development of `MCintegration.py` has been greatly inspired and influenced by several significant works in the field of numerical integration. We would like to express our appreciation to the following: + +- **[vegas](https://github.com/gplepage/vegas)**: A Python package offering Monte Carlo estimations of multidimensional integrals, with notable improvements on the original Vegas algorithm. It's been a valuable reference for us. Learn more from the vegas [documentation](https://vegas.readthedocs.io/). **Reference: G. P. Lepage, J. Comput. Phys. 27, 192 (1978) and G. P. Lepage, J. Comput. Phys. 439, 110386 (2021) [arXiv:2009.05112](https://arxiv.org/abs/2009.05112)**. + +- **[MCIntegration.jl](https://github.com/numericalEFT/MCIntegration.jl)**: A comprehensive Julia package for Monte Carlo integration that has influenced our API design and algorithm choices. See their [documentation](https://numericaleft.github.io/MCIntegration.jl/dev/) for the Julia implementation. + +- **[Cuba](https://feynarts.de/cuba/)** and **[Cuba.jl](https://github.com/giordano/Cuba.jl)**: The Cuba library offers numerous Monte Carlo algorithms for multidimensional numerical integration. **Reference: T. Hahn, Comput. Phys. Commun. 168, 78 (2005) [arXiv:hep-ph/0404043](https://arxiv.org/abs/hep-ph/0404043)**. + +These groundbreaking efforts have paved the way for our project. We extend our deepest thanks to their creators and maintainers. From 7273773f4fd9f11b0ffdc6bbb77512b1dcbfb454 Mon Sep 17 00:00:00 2001 From: fsxbhyy Date: Fri, 31 Oct 2025 16:03:57 +0800 Subject: [PATCH 3/6] readme minor change --- README.md | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index b0bfa92..0b5af55 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # MCintegration: Monte Carlo Integration in Python +[![alpha](https://img.shields.io/badge/docs-alpha-blue.svg)](https://numericaleft.github.io/MCintegration.py/) +[![Build Status](https://github.com/numericalEFT/MCIntegration.py/workflows/CI/badge.svg)](https://github.com/numericalEFT/MCIntegration.py/actions) +[![codecov](https://codecov.io/gh/numericalEFT/MCintegration.py/graph/badge.svg?token=851N2CNOTN)](https://codecov.io/gh/numericalEFT/MCintegration.py) ## Why Choose MCintegration? @@ -309,18 +312,6 @@ As dimension increases: --- -## Contributing - -Contributions are welcome! Please: -1. Fork the repository -2. Create a feature branch -3. Add tests for new functionality -4. Submit a pull request - -## License - -[Your license information here] - ## Citation If you use MCintegration in your research, please cite: @@ -328,9 +319,9 @@ If you use MCintegration in your research, please cite: ```bibtex @software{mcintegration, title = {MCintegration: Monte Carlo Integration in Python}, - author = {[Your Name]}, + author = {[Pengcheng Hou, Tao Wang, Caiyu Fan, Kun Chen]}, year = {2024}, - url = {https://github.com/your-repo/mcintegration} + url = {https://github.com/numericalEFT/MCintegration.py} } ``` From 4fce7316062ebf1b43485587d4e6057d558977d9 Mon Sep 17 00:00:00 2001 From: fancaiyu Date: Fri, 31 Oct 2025 17:51:41 +0800 Subject: [PATCH 4/6] add singlethread test --- MCintegration/mc_multicpu_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MCintegration/mc_multicpu_test.py b/MCintegration/mc_multicpu_test.py index 1be7b9f..d01a577 100644 --- a/MCintegration/mc_multicpu_test.py +++ b/MCintegration/mc_multicpu_test.py @@ -79,6 +79,9 @@ def two_integrands(x, f): if dist.is_initialized(): dist.destroy_process_group() +def test_mcmc_singlethread(): + world_size = 1 + init_process(rank=0, world_size=world_size, fn=run_mcmc, backend=backend) def test_mcmc(world_size=2): # Use fewer processes than CPU cores to avoid resource contention From 6ef8bce72a4b55e7a5e874f40c1731eaa8d27bff Mon Sep 17 00:00:00 2001 From: fancaiyu Date: Fri, 31 Oct 2025 17:56:45 +0800 Subject: [PATCH 5/6] add test for maps and utils --- MCintegration/maps_test.py | 105 ++++++++++++++++++++++++++++++++++-- MCintegration/utils_test.py | 81 ++++++++++++++++++++-------- 2 files changed, 162 insertions(+), 24 deletions(-) diff --git a/MCintegration/maps_test.py b/MCintegration/maps_test.py index 7863828..0d7eaf4 100644 --- a/MCintegration/maps_test.py +++ b/MCintegration/maps_test.py @@ -1,11 +1,45 @@ import unittest import torch - -# import numpy as np +import numpy as np from maps import Map, CompositeMap, Vegas, Configuration from base import LinearMap +class TestConfiguration(unittest.TestCase): + def setUp(self): + self.batch_size = 5 + self.dim = 3 + self.f_dim = 2 + self.device = "cpu" + self.dtype = torch.float64 + + def test_configuration_initialization(self): + config = Configuration( + batch_size=self.batch_size, + dim=self.dim, + f_dim=self.f_dim, + device=self.device, + dtype=self.dtype, + ) + + self.assertEqual(config.batch_size, self.batch_size) + self.assertEqual(config.dim, self.dim) + self.assertEqual(config.f_dim, self.f_dim) + self.assertEqual(config.device, self.device) + + self.assertEqual(config.u.shape, (self.batch_size, self.dim)) + self.assertEqual(config.x.shape, (self.batch_size, self.dim)) + self.assertEqual(config.fx.shape, (self.batch_size, self.f_dim)) + self.assertEqual(config.weight.shape, (self.batch_size,)) + self.assertEqual(config.detJ.shape, (self.batch_size,)) + + self.assertEqual(config.u.dtype, self.dtype) + self.assertEqual(config.x.dtype, self.dtype) + self.assertEqual(config.fx.dtype, self.dtype) + self.assertEqual(config.weight.dtype, self.dtype) + self.assertEqual(config.detJ.dtype, self.dtype) + + class TestMap(unittest.TestCase): def setUp(self): self.device = "cpu" @@ -24,6 +58,35 @@ def test_inverse_not_implemented(self): with self.assertRaises(NotImplementedError): self.map.inverse(torch.tensor([0.5, 0.5], dtype=self.dtype)) + def test_forward_with_detJ(self): + # Create a simple linear map for testing: x = u * A + b + # With A=[1, 1] and b=[0, 0], we have x = u + linear_map = LinearMap([1, 1], [0, 0], device=self.device) + + # Test forward_with_detJ method + u = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device) + x, detJ = linear_map.forward_with_detJ(u) + + # Since it's a linear map from [0,0] to [1,1], x should equal u + self.assertTrue(torch.allclose(x, u)) + + # Determinant of Jacobian should be 1 for linear map with slope 1 + # forward_with_detJ returns actual determinant, not log + self.assertAlmostEqual(detJ.item(), 1.0) + + # Test with a different linear map: x = u * [2, 3] + [1, 1] + # So u = [0.5, 0.5] should give x = [0.5*2+1, 0.5*3+1] = [2, 2.5] + linear_map2 = LinearMap([2, 3], [1, 1], device=self.device) + u2 = torch.tensor([[0.5, 0.5]], dtype=torch.float64, device=self.device) + x2, detJ2 = linear_map2.forward_with_detJ(u2) + expected_x2 = torch.tensor( + [[2.0, 2.5]], dtype=torch.float64, device=self.device + ) + self.assertTrue(torch.allclose(x2, expected_x2)) + + # Determinant should be 2 * 3 = 6 + self.assertAlmostEqual(detJ2.item(), 6.0) + class TestCompositeMap(unittest.TestCase): def setUp(self): @@ -99,6 +162,32 @@ def test_initialization(self): self.assertTrue(torch.equal(self.vegas.grid, self.init_grid)) self.assertEqual(self.vegas.inc.shape, (2, self.ninc)) + def test_ninc_initialization_types(self): + # Test ninc initialization with int + vegas_int = Vegas(self.dim, ninc=5) + self.assertEqual(vegas_int.ninc.tolist(), [5, 5]) + + # Test ninc initialization with list + vegas_list = Vegas(self.dim, ninc=[5, 10]) + self.assertEqual(vegas_list.ninc.tolist(), [5, 10]) + + # Test ninc initialization with numpy array + vegas_np = Vegas(self.dim, ninc=np.array([3, 7])) + self.assertEqual(vegas_np.ninc.tolist(), [3, 7]) + + # Test ninc initialization with torch tensor + vegas_tensor = Vegas(self.dim, ninc=torch.tensor([4, 6])) + self.assertEqual(vegas_tensor.ninc.tolist(), [4, 6]) + + # Test ninc initialization with invalid type + with self.assertRaises(ValueError): + Vegas(self.dim, ninc="invalid") + + def test_ninc_shape_validation(self): + # Test ninc shape validation + with self.assertRaises(ValueError): + Vegas(self.dim, ninc=[1, 2, 3]) # Wrong length + def test_add_training_data(self): # Test adding training data self.vegas.add_training_data(self.sample) @@ -137,6 +226,16 @@ def test_forward(self): self.assertEqual(x.shape, u.shape) self.assertEqual(log_jac.shape, (u.shape[0],)) + def test_forward_with_detJ(self): + # Test forward_with_detJ transformation + u = torch.tensor([[0.1, 0.2], [0.3, 0.4]], dtype=torch.float64) + x, det_jac = self.vegas.forward_with_detJ(u) + self.assertEqual(x.shape, u.shape) + self.assertEqual(det_jac.shape, (u.shape[0],)) + + # Determinant should be positive + self.assertTrue(torch.all(det_jac > 0)) + def test_forward_out_of_bounds(self): # Test forward transformation with out-of-bounds u values u = torch.tensor( @@ -220,4 +319,4 @@ def test_edge_cases(self): if __name__ == "__main__": - unittest.main() + unittest.main() \ No newline at end of file diff --git a/MCintegration/utils_test.py b/MCintegration/utils_test.py index d186774..2e1cf20 100644 --- a/MCintegration/utils_test.py +++ b/MCintegration/utils_test.py @@ -164,17 +164,6 @@ def test_converged_criteria(self): self.assertTrue(ravg.converged(0.1, 0.1)) self.assertFalse(ravg.converged(0.001, 0.001)) - def test_multiplication_with_another_ravg(self): - ravg1 = RAvg(weighted=True) - ravg1.update(2.0, 0.1) - ravg2 = RAvg(weighted=True) - ravg2.update(3.0, 0.1) - - result = ravg1 * ravg2 - self.assertAlmostEqual(result.mean, 6.0) - sdev = (0.1 / 2**2 + 0.1 / 3**2) ** 0.5 * 6.0 - self.assertAlmostEqual(result.sdev, sdev) - def test_multiplication(self): ravg1 = RAvg(weighted=True) # Test multiplication by another RAvg object @@ -216,16 +205,19 @@ def test_multiplication(self): np.allclose([r.sdev for r in result], [2.0 * ravg1.sdev, 3.0 * ravg1.sdev]) ) - def test_division_with_another_ravg(self): - ravg1 = RAvg(weighted=True) - ravg1.update(6.0, 0.1) - ravg2 = RAvg(weighted=True) - ravg2.update(3.0, 0.1) + # Test multiplication with unweighted RAvg + ravg_unweighted = RAvg(weighted=False) + ravg_unweighted.update(2.0, 0.1) + result = ravg_unweighted * 3.0 + self.assertAlmostEqual(result.mean, 6.0) + self.assertAlmostEqual(result.sdev, ravg_unweighted.sdev * 3) - result = ravg1 / ravg2 - self.assertAlmostEqual(result.mean, 2.0) - sdev = (0.1 / 6.0**2 + 0.1 / 3.0**2) ** 0.5 * 2.0 - self.assertAlmostEqual(result.sdev, sdev) + # Test multiplication with zero variance + ravg_zero_var = RAvg(weighted=True) + ravg_zero_var.update(2.0, 0.0) + result = ravg_zero_var * 3.0 + self.assertAlmostEqual(result.mean, 6.0) + self.assertAlmostEqual(result.sdev, 0.0) def test_division(self): ravg1 = RAvg(weighted=True) @@ -271,6 +263,53 @@ def test_division(self): np.allclose([r.sdev for r in result], [ravg1.sdev / 2.0, ravg1.sdev / 3.0]) ) + # Test division with unweighted RAvg + ravg_unweighted = RAvg(weighted=False) + ravg_unweighted.update(6.0, 0.1) + result = ravg_unweighted / 3.0 + self.assertAlmostEqual(result.mean, 2.0) + self.assertAlmostEqual(result.sdev, ravg_unweighted.sdev / 3) + + # Test division with zero variance + ravg_zero_var = RAvg(weighted=True) + ravg_zero_var.update(6.0, 0.0) + result = ravg_zero_var / 3.0 + self.assertAlmostEqual(result.mean, 2.0) + self.assertAlmostEqual(result.sdev, 0.0) + + # Test division of zero by RAvg + zero_ravg = RAvg(weighted=True) + zero_ravg.update(0.0, 0.1) + divisor_ravg = RAvg(weighted=True) + divisor_ravg.update(3.0, 0.1) + result = zero_ravg / divisor_ravg + self.assertAlmostEqual(result.mean, 0.0) + # sdev = (0.1 / 0.0**2 + 0.1 / 3.0**2) ** 0.5 * 0.0 # This would be NaN + # For 0/x, the error propagation gives 0 * sqrt((0.1/0.0^2) + (0.1/3.0^2)) + # But since we're dividing by zero, we need to be careful + # In practice, gvar handles this appropriately + + def test_vector_operations_not_implemented(self): + # Test that NotImplemented is returned for vector operations + ravg = RAvg(weighted=True) + ravg.update(2.0, 0.1) + + # Test multiplication with list (should return NotImplemented) + result = ravg.__mul__([1, 2, 3]) + self.assertEqual(result, NotImplemented) + + # Test division with list (should return NotImplemented) + result = ravg.__truediv__([1, 2, 3]) + self.assertEqual(result, NotImplemented) + + # Test multiplication with numpy array (should return NotImplemented) + result = ravg.__mul__(np.array([1, 2, 3])) + self.assertEqual(result, NotImplemented) + + # Test division with numpy array (should return NotImplemented) + result = ravg.__truediv__(np.array([1, 2, 3])) + self.assertEqual(result, NotImplemented) + class TestUtils(unittest.TestCase): def setUp(self): @@ -315,4 +354,4 @@ def test_get_device_cuda_inactive(self): if __name__ == "__main__": - unittest.main() + unittest.main() \ No newline at end of file From 3c84972d41e2942f0d9598e48aa2ea15ecefb899 Mon Sep 17 00:00:00 2001 From: houpc Date: Fri, 31 Oct 2025 19:14:59 +0800 Subject: [PATCH 6/6] update readme --- MCintegration/base.py | 16 +++------- MCintegration/maps.py | 70 ++++++++++++++++-------------------------- MCintegration/utils.py | 10 +++--- README.md | 5 ++- 4 files changed, 37 insertions(+), 64 deletions(-) diff --git a/MCintegration/base.py b/MCintegration/base.py index a5c220c..248a4fb 100644 --- a/MCintegration/base.py +++ b/MCintegration/base.py @@ -9,10 +9,8 @@ from MCintegration.utils import get_device # Constants for numerical stability -# Small but safe non-zero value -MINVAL = 10 ** (sys.float_info.min_10_exp + 50) -MAXVAL = 10 ** (sys.float_info.max_10_exp - 50) # Large but safe value EPSILON = 1e-16 # Small value to ensure numerical stability +# EPSILON = sys.float_info.epsilon * 1e4 # Small value to ensure numerical stability class BaseDistribution(nn.Module): @@ -98,10 +96,8 @@ def sample(self, batch_size=1, **kwargs): tuple: (uniform samples, log_det_jacobian=0) """ # torch.manual_seed(0) # test seed - u = torch.rand((batch_size, self.dim), - device=self.device, dtype=self.dtype) - log_detJ = torch.zeros( - batch_size, device=self.device, dtype=self.dtype) + u = torch.rand((batch_size, self.dim), device=self.device, dtype=self.dtype) + log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype) return u, log_detJ @@ -133,16 +129,14 @@ def __init__(self, A, b, device=None, dtype=torch.float32): elif isinstance(A, torch.Tensor): self.A = A.to(dtype=self.dtype, device=self.device) else: - raise ValueError( - "'A' must be a list, numpy array, or torch tensor.") + raise ValueError("'A' must be a list, numpy array, or torch tensor.") if isinstance(b, (list, np.ndarray)): self.b = torch.tensor(b, dtype=self.dtype, device=self.device) elif isinstance(b, torch.Tensor): self.b = b.to(dtype=self.dtype, device=self.device) else: - raise ValueError( - "'b' must be a list, numpy array, or torch tensor.") + raise ValueError("'b' must be a list, numpy array, or torch tensor.") # Pre-compute determinant of Jacobian for efficiency self._detJ = torch.prod(self.A) diff --git a/MCintegration/maps.py b/MCintegration/maps.py index 4f03853..362de2a 100644 --- a/MCintegration/maps.py +++ b/MCintegration/maps.py @@ -9,7 +9,8 @@ from MCintegration.utils import get_device import sys -TINY = 10 ** (sys.float_info.min_10_exp + 50) # Small but safe non-zero value +# TINY = 10 ** (sys.float_info.min_10_exp + 50) # Small but safe non-zero value +TINY = 1e-45 class Configuration: @@ -38,14 +39,10 @@ def __init__(self, batch_size, dim, f_dim, device=None, dtype=torch.float32): self.f_dim = f_dim self.batch_size = batch_size # Initialize tensors for storing samples and results - self.u = torch.empty( - (batch_size, dim), dtype=dtype, device=self.device) - self.x = torch.empty( - (batch_size, dim), dtype=dtype, device=self.device) - self.fx = torch.empty((batch_size, f_dim), - dtype=dtype, device=self.device) - self.weight = torch.empty( - (batch_size,), dtype=dtype, device=self.device) + self.u = torch.empty((batch_size, dim), dtype=dtype, device=self.device) + self.x = torch.empty((batch_size, dim), dtype=dtype, device=self.device) + self.fx = torch.empty((batch_size, f_dim), dtype=dtype, device=self.device) + self.weight = torch.empty((batch_size,), dtype=dtype, device=self.device) self.detJ = torch.empty((batch_size,), dtype=dtype, device=self.device) @@ -202,8 +199,7 @@ def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32): (self.dim,), ninc, dtype=torch.int32, device=self.device ) elif isinstance(ninc, (list, np.ndarray)): - self.ninc = torch.tensor( - ninc, dtype=torch.int32, device=self.device) + self.ninc = torch.tensor(ninc, dtype=torch.int32, device=self.device) elif isinstance(ninc, torch.Tensor): self.ninc = ninc.to(dtype=torch.int32, device=self.device) else: @@ -223,8 +219,9 @@ def __init__(self, dim, ninc=1000, device=None, dtype=torch.float32): self.sum_f = torch.zeros( self.dim, self.max_ninc, dtype=self.dtype, device=self.device ) - self.n_f = torch.zeros( - self.dim, self.max_ninc, dtype=self.dtype, device=self.device + self.n_f = ( + torch.zeros(self.dim, self.max_ninc, dtype=self.dtype, device=self.device) + + TINY ) self.avg_f = torch.ones( (self.dim, self.max_ninc), dtype=self.dtype, device=self.device @@ -308,10 +305,8 @@ def adapt(self, alpha=0.5): """ # Aggregate training data across distributed processes if applicable if torch.distributed.is_initialized(): - torch.distributed.all_reduce( - self.sum_f, op=torch.distributed.ReduceOp.SUM) - torch.distributed.all_reduce( - self.n_f, op=torch.distributed.ReduceOp.SUM) + torch.distributed.all_reduce(self.sum_f, op=torch.distributed.ReduceOp.SUM) + torch.distributed.all_reduce(self.n_f, op=torch.distributed.ReduceOp.SUM) # Initialize a new grid tensor new_grid = torch.empty( @@ -319,8 +314,7 @@ def adapt(self, alpha=0.5): ) if alpha > 0: - tmp_f = torch.empty( - self.max_ninc, dtype=self.dtype, device=self.device) + tmp_f = torch.empty(self.max_ninc, dtype=self.dtype, device=self.device) # avg_f = torch.ones(self.inc.shape[1], dtype=self.dtype, device=self.device) # print(self.ninc.shape, self.dim) @@ -338,14 +332,12 @@ def adapt(self, alpha=0.5): if alpha > 0: # Smooth avg_f - tmp_f[0] = (7.0 * avg_f[0] + avg_f[1] - ).abs() / 8.0 # Shape: () + tmp_f[0] = (7.0 * avg_f[0] + avg_f[1]).abs() / 8.0 # Shape: () tmp_f[ninc - 1] = ( 7.0 * avg_f[ninc - 1] + avg_f[ninc - 2] ).abs() / 8.0 # Shape: () - tmp_f[1: ninc - 1] = ( - 6.0 * avg_f[1: ninc - 1] + - avg_f[: ninc - 2] + avg_f[2:ninc] + tmp_f[1 : ninc - 1] = ( + 6.0 * avg_f[1 : ninc - 1] + avg_f[: ninc - 2] + avg_f[2:ninc] ).abs() / 8.0 # Normalize tmp_f to ensure the sum is 1 @@ -393,8 +385,7 @@ def adapt(self, alpha=0.5): ) / avg_f_relevant # Calculate the new grid points using vectorized operations - new_grid[d, 1:ninc] = grid_left + \ - fractional_positions * inc_relevant + new_grid[d, 1:ninc] = grid_left + fractional_positions * inc_relevant else: # If alpha == 0 or no training data, retain the existing grid new_grid[d, :] = self.grid[d, :] @@ -407,8 +398,7 @@ def adapt(self, alpha=0.5): self.inc.zero_() # Reset increments to zero for d in range(self.dim): self.inc[d, : self.ninc[d]] = ( - self.grid[d, 1: self.ninc[d] + 1] - - self.grid[d, : self.ninc[d]] + self.grid[d, 1 : self.ninc[d] + 1] - self.grid[d, : self.ninc[d]] ) # Clear accumulated training data for the next adaptation cycle @@ -432,8 +422,7 @@ def make_uniform(self): device=self.device, ) self.inc[d, : self.ninc[d]] = ( - self.grid[d, 1: self.ninc[d] + 1] - - self.grid[d, : self.ninc[d]] + self.grid[d, 1 : self.ninc[d] + 1] - self.grid[d, : self.ninc[d]] ) self.clear() @@ -459,8 +448,7 @@ def forward(self, u): batch_size = u.size(0) # Clamp iu to [0, ninc-1] to handle out-of-bounds indices - min_tensor = torch.zeros( - (1, self.dim), dtype=iu.dtype, device=self.device) + min_tensor = torch.zeros((1, self.dim), dtype=iu.dtype, device=self.device) # Shape: (1, dim) max_tensor = (self.ninc - 1).unsqueeze(0).to(iu.dtype) iu_clamped = torch.clamp(iu, min=min_tensor, max=max_tensor) @@ -471,8 +459,7 @@ def forward(self, u): grid_gather = torch.gather(grid_expanded, 2, iu_clamped.unsqueeze(2)).squeeze( 2 ) # Shape: (batch_size, dim) - inc_gather = torch.gather( - inc_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(2) + inc_gather = torch.gather(inc_expanded, 2, iu_clamped.unsqueeze(2)).squeeze(2) x = grid_gather + inc_gather * du_ninc log_detJ = (inc_gather * self.ninc).log_().sum(dim=1) @@ -484,8 +471,7 @@ def forward(self, u): # For each sample and dimension, set x to grid[d, ninc[d]] # and log_detJ += log(inc[d, ninc[d]-1] * ninc[d]) boundary_grid = ( - self.grid[torch.arange( - self.dim, device=self.device), self.ninc] + self.grid[torch.arange(self.dim, device=self.device), self.ninc] .unsqueeze(0) .expand(batch_size, -1) ) @@ -493,8 +479,7 @@ def forward(self, u): x[out_of_bounds] = boundary_grid[out_of_bounds] boundary_inc = ( - self.inc[torch.arange( - self.dim, device=self.device), self.ninc - 1] + self.inc[torch.arange(self.dim, device=self.device), self.ninc - 1] .unsqueeze(0) .expand(batch_size, -1) ) @@ -522,8 +507,7 @@ def inverse(self, x): # Initialize output tensors u = torch.empty_like(x) - log_detJ = torch.zeros( - batch_size, device=self.device, dtype=self.dtype) + log_detJ = torch.zeros(batch_size, device=self.device, dtype=self.dtype) # Loop over each dimension to perform inverse mapping for d in range(dim): @@ -537,8 +521,7 @@ def inverse(self, x): # Perform searchsorted to find indices where x should be inserted to maintain order # torch.searchsorted returns indices in [0, max_ninc +1] iu = ( - torch.searchsorted( - grid_d, x[:, d].contiguous(), right=True) - 1 + torch.searchsorted(grid_d, x[:, d].contiguous(), right=True) - 1 ) # Shape: (batch_size,) # Clamp indices to [0, ninc_d - 1] to ensure they are within valid range @@ -551,8 +534,7 @@ def inverse(self, x): inc_gather = inc_d[iu_clamped] # Shape: (batch_size,) # Compute du: fractional part within the increment - du = (x[:, d] - grid_gather) / \ - (inc_gather + TINY) # Shape: (batch_size,) + du = (x[:, d] - grid_gather) / (inc_gather + TINY) # Shape: (batch_size,) # Compute u for dimension d u[:, d] = (du + iu_clamped) / ninc_d # Shape: (batch_size,) diff --git a/MCintegration/utils.py b/MCintegration/utils.py index aa7ab38..7c3f87a 100644 --- a/MCintegration/utils.py +++ b/MCintegration/utils.py @@ -11,8 +11,8 @@ # Constants for numerical stability # Small but safe non-zero value -MINVAL = 10 ** (sys.float_info.min_10_exp + 50) -MAXVAL = 10 ** (sys.float_info.max_10_exp - 50) # Large but safe value +# MINVAL = 10 ** (sys.float_info.min_10_exp + 50) +MINVAL = 1e-45 _VECTOR_TYPES = [np.ndarray, list] @@ -83,8 +83,7 @@ def add(self, res): self._wlist.append(1 / (res.var if res.var > MINVAL else MINVAL)) var = 1.0 / np.sum(self._wlist) sdev = np.sqrt(var) - mean = np.sum( - [w * m for w, m in zip(self._wlist, self._mlist)]) * var + mean = np.sum([w * m for w, m in zip(self._wlist, self._mlist)]) * var super(RAvg, self).__init__(*gvar.gvar(mean, sdev).internaldata) else: # Simple average @@ -93,8 +92,7 @@ def add(self, res): self._count += 1 mean = self._sum / self._count var = self._varsum / self._count**2 - super(RAvg, self).__init__( - *gvar.gvar(mean, np.sqrt(var)).internaldata) + super(RAvg, self).__init__(*gvar.gvar(mean, np.sqrt(var)).internaldata) def extend(self, ravg): """ diff --git a/README.md b/README.md index 0b5af55..4fc6bc9 100644 --- a/README.md +++ b/README.md @@ -178,9 +178,8 @@ def singular_func(x, f): Integrand with singularity at x=0. The integral ∫₀¹ log(x)/√x dx = -4 (analytical result) """ - f[:, 0] = torch.where(x[:, 0] < 1e-14, - 0.0, - torch.log(x[:, 0]) / torch.sqrt(x[:, 0])) + x_safe = torch.clamp(x[:, 0], min=1e-32) + f[:, 0] = torch.log(x_safe) / torch.sqrt(x_safe) return f[:, 0] # Integration parameters