Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Vegas+ #64

Merged
merged 30 commits into from
Aug 27, 2021
Merged
Show file tree
Hide file tree
Changes from 29 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
4a9933a
stratified NON adaptive sampling
andrea-pasquale Feb 9, 2021
a7517fc
added adaptive-stratified sampling
andrea-pasquale Feb 9, 2021
c9ec2fc
PEP8 style corrections
andrea-pasquale Feb 13, 2021
883cd00
updated __init__.py and test_algs.py
andrea-pasquale Feb 13, 2021
befb032
added example using StratifiedFlow
andrea-pasquale Feb 13, 2021
f0fd609
fixed error in redistribute_samples
andrea-pasquale Feb 15, 2021
bf13849
first implementation of vegas+ in vegasflow
andrea-pasquale Mar 2, 2021
588d7ca
fixed problems in stratified.py and vflowplus.py
andrea-pasquale Mar 8, 2021
15aecc4
Merge branch 'master' into vegas+
andrea-pasquale Mar 8, 2021
b4bee75
first attempt at fixing retracing
andrea-pasquale Mar 12, 2021
d750c3c
suggested changes
scarlehoff Mar 15, 2021
26b5650
typo
scarlehoff Mar 15, 2021
dd7d54c
fixed self.adaptive issue
andrea-pasquale Mar 16, 2021
1426f37
Merge pull request #65 from N3PDF/vegasplus_retracing
scarrazza Mar 16, 2021
ad89727
fixed crash problem in vflowplus
andrea-pasquale Apr 14, 2021
016abe2
set adaptive off is dim > 13
andrea-pasquale Apr 14, 2021
38b87a0
typo
andrea-pasquale Apr 14, 2021
462373e
removed StratifiedFlow
andrea-pasquale Apr 22, 2021
18ca369
file update from previous commit
andrea-pasquale Apr 22, 2021
07cf119
all events in same device for vegas+
andrea-pasquale Jun 22, 2021
77674fe
merge with conflicts with master
scarlehoff Aug 16, 2021
f577231
fixed some todos
scarlehoff Aug 16, 2021
0b9ae0b
stylize
scarlehoff Aug 16, 2021
9dadcf6
bump version to 1.3.0
scarlehoff Aug 16, 2021
b3b8c36
more style
scarlehoff Aug 17, 2021
d013f4f
ups
scarlehoff Aug 17, 2021
34def6f
remove duplicated code
scarlehoff Aug 18, 2021
b1a4713
remove more duplicates
scarlehoff Aug 18, 2021
516610d
bugfix
scarlehoff Aug 25, 2021
1e1d4e3
add warning when changing event limit
scarlehoff Aug 26, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions examples/retracing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
"""
Retracing example in VegasFlowPlus
"""

from vegasflow import VegasFlowPlus, VegasFlow, PlainFlow
from vegasflow.configflow import DTYPE, DTYPEINT, run_eager, float_me
import time
import numpy as np
import tensorflow as tf

# MC integration setup
dim = 2
ncalls = np.int32(1e3)
n_iter = 5

@tf.function(input_signature=[
tf.TensorSpec(shape=[None,dim], dtype=DTYPE),
tf.TensorSpec(shape=[], dtype=DTYPEINT),
tf.TensorSpec(shape=[None], dtype=DTYPE)
]
)
def symgauss(xarr, n_dim=None, weight=None, **kwargs):
"""symgauss test function"""
if n_dim is None:
n_dim = xarr.shape[-1]
a = tf.constant(0.1, dtype=DTYPE)
n100 = tf.cast(100 * n_dim, dtype=DTYPE)
pref = tf.pow(1.0 / a / np.sqrt(np.pi), float_me(n_dim))
coef = tf.reduce_sum(tf.range(n100 + 1))
coef += tf.reduce_sum(tf.square((xarr - 1.0 / 2.0) / a), axis=1)
coef -= (n100 + 1) * n100 / 2.0
return pref * tf.exp(-coef)



if __name__ == "__main__":
"""Testing several different integrations"""

# run_eager()
vegas_instance = VegasFlowPlus(dim, ncalls,adaptive=True)
vegas_instance.compile(symgauss)
vegas_instance.run_integration(n_iter)
4 changes: 3 additions & 1 deletion src/vegasflow/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Monte Carlo integration with Tensorflow"""

from vegasflow.configflow import int_me, float_me, run_eager

# Expose the main interfaces
from vegasflow.vflow import VegasFlow, vegas_wrapper, vegas_sampler
from vegasflow.plain import PlainFlow, plain_wrapper, plain_sampler
from vegasflow.vflowplus import VegasFlowPlus, vegasflowplus_wrapper, vegasflowplus_sampler

__version__ = "1.2.2"
__version__ = "1.3.0"
12 changes: 9 additions & 3 deletions src/vegasflow/configflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,17 @@
# Some global parameters
BINS_MAX = 50
ALPHA = 1.5
BETA = 0.75 # Vegas+
TECH_CUT = 1e-8

# Set up the logistics of the integration
# set the limits lower if hitting memory problems

# Events Limit limits how many events are done in one single run of the event_loop
# set it lower if hitting memory problems
MAX_EVENTS_LIMIT = int(1e6)
# Maximum number of evaluation per hypercube for VegasFlowPlus
MAX_NEVAL_HCUBE = int(1e4)

# Select the list of devices to look for
DEFAULT_ACTIVE_DEVICES = ["GPU"] # , 'CPU']

Expand Down Expand Up @@ -66,15 +72,15 @@
else:
DTYPE = tf.float64
FMAX = tf.constant(np.finfo(np.float64).max, dtype=DTYPE)
logger.warning(f"PDFFLOW_FLOAT={_float_env} not understood, defaulting to 64 bits")
logger.warning(f"VEGASFLOW_FLOAT={_float_env} not understood, defaulting to 64 bits")

if _int_env == "64":
DTYPEINT = tf.int64
elif _int_env == "32":
DTYPEINT = tf.int32
else:
DTYPEINT = tf.int64
logger.warning(f"PDFFLOW_INT={_int_env} not understood, defaulting to 64 bits")
logger.warning(f"VEGASFLOW_INT={_int_env} not understood, defaulting to 64 bits")


def run_eager(flag=True):
Expand Down
8 changes: 6 additions & 2 deletions src/vegasflow/monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
DTYPEINT,
TECH_CUT,
float_me,
int_me,
)


Expand Down Expand Up @@ -330,7 +331,7 @@ def set_distribute(self, queue_object):
self.cluster = queue_object
self.distribute = True

def run_event(self, **kwargs):
def run_event(self, tensorize_events=False, **kwargs):
"""
Runs the Monte Carlo event. This corresponds to a number of calls
decided by the `events_per_run` variable. The variable `acc` is exposed
Expand Down Expand Up @@ -358,7 +359,10 @@ def run_event(self, **kwargs):
ncalls = min(events_left, self.events_per_run)
pc += ncalls / self.n_events * 100
percentages.append(pc)
events_to_do.append(ncalls)
if tensorize_events:
events_to_do.append(int_me(ncalls))
else:
events_to_do.append(ncalls)
events_left -= self.events_per_run

if self.devices:
Expand Down
7 changes: 4 additions & 3 deletions src/vegasflow/plain.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Plain implementation of the plainest possible MonteCarlo
"""

from vegasflow.configflow import DTYPE, fone, fzero
from vegasflow.configflow import fone, fzero
from vegasflow.monte_carlo import MonteCarloFlow, wrapper, sampler
import tensorflow as tf

Expand Down Expand Up @@ -38,9 +38,10 @@ def _run_iteration(self):


def plain_wrapper(*args, **kwargs):
""" Wrapper around PlainFlow """
"""Wrapper around PlainFlow"""
return wrapper(PlainFlow, *args, **kwargs)


def plain_sampler(*args, **kwargs):
""" Wrapper sampler around PlainFlow """
"""Wrapper sampler around PlainFlow"""
return sampler(PlainFlow, *args, **kwargs)
17 changes: 17 additions & 0 deletions src/vegasflow/tests/test_algs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from vegasflow.configflow import DTYPE
from vegasflow.vflow import VegasFlow
from vegasflow.plain import PlainFlow
from vegasflow.vflowplus import VegasFlowPlus
from vegasflow import plain_sampler, vegas_sampler

# Test setup
Expand Down Expand Up @@ -66,6 +67,7 @@ def check_is_one(result, sigmas=3):


def test_VegasFlow():
""" Test VegasFlow class, importance sampling algorithm"""
for mode in range(4):
vegas_instance = instance_and_compile(VegasFlow, mode)
_ = vegas_instance.run_integration(n_iter)
Expand All @@ -90,6 +92,7 @@ def test_VegasFlow():


def test_VegasFlow_save_grid():
""" Test the grid saving feature of vegasflow """
tmp_filename = tempfile.mktemp()
vegas_instance = instance_and_compile(VegasFlow)
# Run an iteration so the grid is not trivial
Expand Down Expand Up @@ -173,3 +176,17 @@ def test_rng_generation(n_events=100):
_ = helper_rng_tester(p.generate_random_array, n_events)
v = vegas_sampler(example_integrand, dim, n_events, training_steps=2)
_ = helper_rng_tester(v, n_events)

def test_VegasFlowPlus_ADAPTIVE_SAMPLING():
""" Test Vegasflow with Adaptive Sampling on (the default) """
for mode in range(4):
vflowplus_instance = instance_and_compile(VegasFlowPlus, mode)
result = vflowplus_instance.run_integration(n_iter)
check_is_one(result)

def test_VegasFlowPlus_NOT_ADAPTIVE_SAMPLING():
""" Test Vegasflow with Adaptive Sampling off (non-default) """
vflowplus_instance = VegasFlowPlus(dim, ncalls, adaptive=False)
vflowplus_instance.compile(example_integrand)
result = vflowplus_instance.run_integration(n_iter)
check_is_one(result)
39 changes: 21 additions & 18 deletions src/vegasflow/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,13 @@
import tensorflow as tf


@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=DTYPE),
tf.TensorSpec(shape=[None, None], dtype=DTYPEINT),
tf.TensorSpec(shape=[], dtype=DTYPEINT)])
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None], dtype=DTYPE),
tf.TensorSpec(shape=[None, None], dtype=DTYPEINT),
tf.TensorSpec(shape=[], dtype=DTYPEINT),
]
)
def consume_array_into_indices(input_arr, indices, result_size):
"""
Accumulate the input tensor `input_arr` into an output tensor of
Expand Down Expand Up @@ -38,6 +42,7 @@ def consume_array_into_indices(input_arr, indices, result_size):
final_result = tf.reduce_sum(res_tmp, axis=1)
return final_result


def py_consume_array_into_indices(input_arr, indices, result_size):
"""
Python interface wrapper for ``consume_array_into_indices``.
Expand All @@ -46,8 +51,8 @@ def py_consume_array_into_indices(input_arr, indices, result_size):
return consume_array_into_indices(float_me(input_arr), int_me(indices), int_me(result_size))


def generate_condition_function(n_mask, condition='and'):
""" Generates a function that takes a number of masks
def generate_condition_function(n_mask, condition="and"):
"""Generates a function that takes a number of masks
and returns a combination of all n_masks for the given condition.

It is possible to pass a list of allowed conditions, in that case
Expand Down Expand Up @@ -76,7 +81,7 @@ def generate_condition_function(n_mask, condition='and'):
full_mask=<tf.Tensor: shape=(3,), dtype=bool, numpy=array([ True, False, False])>
indices=<tf.Tensor: shape=(1, 1), dtype=int32, numpy=array([[0]], dtype=int32)>



Parameters
----------
Expand All @@ -90,16 +95,14 @@ def generate_condition_function(n_mask, condition='and'):
`condition_to_idx`: function
function(*masks) -> full_mask, true indices
"""
allowed_conditions = {
'and': tf.math.logical_and,
'or' : tf.math.logical_or
}
allowed_conditions = {"and": tf.math.logical_and, "or": tf.math.logical_or}
allo = list(allowed_conditions.keys())

# Check that the user is not asking for anything weird
if n_mask < 2:
raise ValueError(f"At least two masks needed to generate a wrapper")
elif isinstance(condition, str):
raise ValueError("At least two masks needed to generate a wrapper")

if isinstance(condition, str):
if condition not in allowed_conditions:
raise ValueError(f"Wrong condition {condition}, allowed values are {allo}")
is_list = False
Expand All @@ -112,7 +115,7 @@ def generate_condition_function(n_mask, condition='and'):
is_list = True

def py_condition(*masks):
""" Receives a list of conditions and returns a result mask
"""Receives a list of conditions and returns a result mask
and the list of indices in which the result mask is True

Returns
Expand All @@ -125,15 +128,15 @@ def py_condition(*masks):
if is_list:
res = masks[0]
for i, cond in enumerate(condition):
res = allowed_conditions[cond](res, masks[i+1])
elif condition == 'and':
res = allowed_conditions[cond](res, masks[i + 1])
elif condition == "and":
res = tf.math.reduce_all(masks, axis=0)
elif condition == 'or':
elif condition == "or":
res = tf.math.reduce_any(masks, axis=0)
indices = int_me(tf.where(res))
return res, indices

signature = n_mask*[tf.TensorSpec(shape=[None], dtype=tf.bool)]
signature = n_mask * [tf.TensorSpec(shape=[None], dtype=tf.bool)]

condition_to_idx = tf.function(py_condition, input_signature=signature)
return condition_to_idx
return condition_to_idx
Loading