Skip to content

Commit

Permalink
Merge pull request #115 from lcgraham/master
Browse files Browse the repository at this point in the history
Add sensitivty package to master
  • Loading branch information
lcgraham committed Jun 30, 2015
2 parents 383ad10 + 89fea22 commit d2c974c
Show file tree
Hide file tree
Showing 18 changed files with 1,366 additions and 9 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,4 @@ notifications:
branches:
only:
- master

17 changes: 15 additions & 2 deletions bet/Comm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
to run BET.
"""

import collections

class comm_for_no_mpi4py(object):

Expand Down Expand Up @@ -43,6 +44,16 @@ def allgather(self, val, val2=None):
"""
return val

def gather(self, val1, val2=None, root=0):
"""
:param object val1: object to gather
:param object val2: object to gather
:param int root: 0
:rtype: object
:returns: val1
"""
return [val1]

def allreduce(self, val1, op=None):
"""
:param object val1: object to allreduce
Expand All @@ -62,12 +73,14 @@ def bcast(self, val, root=0):

def scatter(self, val1, val2=None, root=0):
"""
:param object val1: object to Scatter
:param object val2: object to Scatter
:param object val1: object to scatter
:param object val2: object to scatter
:param int root: 0
:rtype: object
:returns: val1
"""
if isinstance(val1, collections.Iterable) and len(val1)==1:
val1 = val1[0]
return val1

def Allgather(self, val, val2=None):
Expand Down
4 changes: 2 additions & 2 deletions bet/calculateP/calculateP.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def prob(samples, data, rho_D_M, d_distr_samples, d_Tree=None):
local_index = range(0+comm.rank, samples.shape[0], comm.size)
samples_local = samples[local_index, :]
data_local = data[local_index, :]
local_array = np.array(local_index)
local_array = np.array(local_index, dtype='int64')

# Determine which inputs go to which M bins using the QoI
(_, io_ptr) = d_Tree.query(data_local)
Expand Down Expand Up @@ -218,7 +218,7 @@ def prob_mc(samples, data, rho_D_M, d_distr_samples,
samples_local = samples[local_index, :]
data_local = data[local_index, :]
lam_vol_local = lam_vol[local_index]
local_array = np.array(local_index)
local_array = np.array(local_index, dtype='int64')

# Determine which inputs go to which M bins using the QoI
(_, io_ptr_local) = d_Tree.query(data_local)
Expand Down
2 changes: 1 addition & 1 deletion bet/calculateP/voronoiHistogram.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def histogramdd_volumes(edges, points):
"""
Given a sequence of arrays describing the edges of voronoi cells (bins)
along each dimension and an 'ij' ordered sequence of points (1 per voronoi
cell) returns a list of the volumes associated with these voronoic cells.
cell) returns a list of the volumes associated with these voronoi cells.
:param edges: A sequence of arrays describing the edges of bins along
each dimension.
Expand Down
13 changes: 13 additions & 0 deletions bet/sensitivity/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (C) 2014-2015 The BET Development Team

r"""
This subpackage provides methods for approximating gradients of
QoI maps and choosing optimal QoIs to use in the inverse problem.
* :mod:`~bet.sensitivity.gradients` provides methods for approximating
gradients of QoI maps.
* :mod:`~bet.sensitivity.chooseQoIs` provides methods for choosing optimal
QoIs to use in the inverse problem.
"""

__all__ = ['gradients', 'chooseQoIs']
148 changes: 148 additions & 0 deletions bet/sensitivity/chooseQoIs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
# Copyright (C) 2014-2015 The BET Development Team

"""
This module contains functions choosing optimal QoIs to use in the stochastic
inverse problem.
"""
import numpy as np
from itertools import combinations
from bet.Comm import comm

def chooseOptQoIs(grad_tensor, qoiIndices=None, num_qois_return=None,
num_optsets_return=None):
r"""
Given gradient vectors at some points (centers) in the parameter space, a
set of QoIs to choose from, and the number of desired QoIs to return, this
method returns the ``num_optsets_return`` best sets of QoIs with with repsect
to skewness properties. This method is brute force, i.e., if the method is
given 10,000 QoIs and told to return the N best sets of 3, it will check all
10,000 choose 3 possible sets. This can be expensive, methods currently
being developed will take a more careful approach and reduce computational
cost.
:param grad_tensor: Gradient vectors at each point of interest in the
parameter space :math:`\Lambda` for each QoI map.
:type grad_tensor: :class:`np.ndarray` of shape (num_centers, num_qois,
Lambda_dim) where num_centers is the number of points in :math:`\Lambda`
we have approximated the gradient vectors and num_qois is the total
number of possible QoIs to choose from
:param qoiIndices: Set of QoIs to consider from grad_tensor. Default is
range(0, grad_tensor.shape[1])
:type qoiIndices: :class:`np.ndarray` of size (1, num QoIs to consider)
:param int num_qois_return: Number of desired QoIs to use in the
inverse problem. Default is Lambda_dim
:param int num_optsets_return: Number of best sets to return
Default is 10
:rtype: `np.ndarray` of shape (num_optsets_returned, num_qois_returned + 1)
:returns: condnum_indices_mat
"""
(condnum_indices_mat, _) = chooseOptQoIs_verbose(grad_tensor,
qoiIndices, num_qois_return, num_optsets_return)

return condnum_indices_mat

def chooseOptQoIs_verbose(grad_tensor, qoiIndices=None, num_qois_return=None,
num_optsets_return=None):
r"""
Given gradient vectors at some points(centers) in the parameter space, a set
of QoIs to choose from, and the number of desired QoIs to return, this
method return the set of optimal QoIs to use in the inverse problem by
choosing the set with optimal skewness properties. Also a tensor that
represents the singualre values of the matrices formed by the gradient
vectors of the optimal QoIs at each center is returned.
:param grad_tensor: Gradient vectors at each point of interest in the
parameter space :math:`\Lambda` for each QoI map.
:type grad_tensor: :class:`np.ndarray` of shape (num_centers, num_qois,
Lambda_dim) where num_centers is the number of points in :math:`\Lambda`
we have approximated the gradient vectors and num_qois is the total
number of possible QoIs to choose from
:param qoiIndices: Set of QoIs to consider from grad_tensor. Default is
range(0, grad_tensor.shape[1])
:type qoiIndices: :class:`np.ndarray` of size (1, num QoIs to consider)
:param int num_qois_return: Number of desired QoIs to use in the
inverse problem. Default is Lambda_dim
:param int num_optsets_return: Number of best sets to return
Default is 10
:rtype: tuple
:returns: (condnum_indices_mat, optsingvals) where condnum_indices_mat has
shape (num_optsets_return, num_qois_return+1) and optsingvals
has shape (num_centers, num_qois_return, num_optsets_return)
"""
num_centers = grad_tensor.shape[0]
Lambda_dim = grad_tensor.shape[2]
if qoiIndices is None:
qoiIndices = range(0, grad_tensor.shape[1])
if num_qois_return is None:
num_qois_return = Lambda_dim
if num_optsets_return is None:
num_optsets_return = 10

# Find all posible combinations of QoIs
if comm.rank == 0:
qoi_combs = np.array(list(combinations(list(qoiIndices),
num_qois_return)))
print 'Possible sets of QoIs : ', qoi_combs.shape[0]
qoi_combs = np.array_split(qoi_combs, comm.size)
else:
qoi_combs = None

# Scatter them throughout the processors
qoi_combs = comm.scatter(qoi_combs, root=0)

# For each combination, check the skewness and keep the sets
# that have the best skewness, i.e., smallest condition number
condnum_indices_mat = np.zeros([num_optsets_return, num_qois_return + 1])
condnum_indices_mat[:, 0] = 1E11
optsingvals_tensor = np.zeros([num_centers, num_qois_return,
num_optsets_return])
for qoi_set in range(len(qoi_combs)):
singvals = np.linalg.svd(
grad_tensor[:, qoi_combs[qoi_set], :], compute_uv=False)

# Find the centers that have atleast one zero sinular value
indz = singvals[:, -1] == 0
indnz = singvals[:, -1] != 0

current_condnum = (np.sum(singvals[indnz, 0] / singvals[indnz, -1], \
axis=0) + 1E9 * np.sum(indz)) / singvals.shape[0]

if current_condnum < condnum_indices_mat[-1, 0]:
condnum_indices_mat[-1, :] = np.append(np.array([current_condnum]),
qoi_combs[qoi_set])
order = condnum_indices_mat[:, 0].argsort()
condnum_indices_mat = condnum_indices_mat[order]

optsingvals_tensor[:, :, -1] = singvals
optsingvals_tensor = optsingvals_tensor[:, :, order]

# Wait for all processes to get to this point
comm.Barrier()

# Gather the best sets and condition numbers from each processor
condnum_indices_mat = np.array(comm.gather(condnum_indices_mat, root=0))
optsingvals_tensor = np.array(comm.gather(optsingvals_tensor, root=0))

# Find the num_optsets_return smallest condition numbers from all processors
if comm.rank == 0:
condnum_indices_mat = condnum_indices_mat.reshape(num_optsets_return * \
comm.size, num_qois_return + 1)
optsingvals_tensor = optsingvals_tensor.reshape(num_centers,
num_qois_return, num_optsets_return * comm.size)
order = condnum_indices_mat[:, 0].argsort()

condnum_indices_mat = condnum_indices_mat[order]
condnum_indices_mat = condnum_indices_mat[:num_optsets_return, :]

optsingvals_tensor = optsingvals_tensor[:, :, order]
optsingvals_tensor = optsingvals_tensor[:, :, :num_optsets_return]

condnum_indices_mat = comm.bcast(condnum_indices_mat, root=0)
optsingvals_tensor = comm.bcast(optsingvals_tensor, root=0)

return (condnum_indices_mat, optsingvals_tensor)
Loading

0 comments on commit d2c974c

Please sign in to comment.