diff --git a/bet/Comm.py b/bet/Comm.py
index d8862d5f..20684abf 100644
--- a/bet/Comm.py
+++ b/bet/Comm.py
@@ -1,42 +1,123 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
-This module provides a workaround for people without mpi4py installed
+This module provides a workaround for people without mpi4py installed
to run BET.
"""
-class comm_for_no_mpi4py:
+
+class comm_for_no_mpi4py(object):
+
+ """
+ Provides a fake MPI.COMM_WORLD implementation so that the user need not
+ install mpi4py.
+ """
+
def __init__(self):
+ """
+ Initialization
+ """
pass
+
def Get_size(self):
+ """
+ :rtype: int
+ :returns: 1
+ """
return 1
+
def Get_rank(self):
- return 0
- def allgather(self,val):
+ """
+ :rtype: int
+ :returns 0
+ """
+ return 0
+
+ def allgather(self, val):
+ """
+ :param object val: object to allgather
+ :rtype: object
+ :returns: val
+ """
return val
- def allreduce(self,val1, op=None):
+
+ def allreduce(self, val1, op=None):
+ """
+ :param object val1: object to allreduce
+ :rtype: object
+ :returns: val1
+ """
return val1
- def bcast(self,val, root=0):
- return val
- def Allgather(self,val):
+
+ def bcast(self, val, root=0):
+ """
+ :param object val: object to broadcast
+ :param int root: 0
+ :rtype: object
+ :returns: val
+ """
return val
- def Allreduce(self,val1, val2, op=None):
+
+ def Allgather(self, val):
+ """
+ :param object val: object to Allgather
+ :rtype: object
+ :returns: val
+ """
+ return val
+
+ def Allreduce(self, val1, val2, op=None):
+ """
+ :param object val1: object to Allreduce
+ :param object val2: object to Allreduce
+ :param op: None
+ :rtype: object
+ :returns: val1
+ """
return val1
- def Bcast(self,val, root=0):
+
+ def Bcast(self, val, root=0):
+ """
+ :param object val: object to gather
+ :param int root: 0
+ :rtype: object
+ :returns: val
+ """
return val
+
def Scatter(self, val1, val2, root=0):
+ """
+ :param object val1: object to Scatter
+ :param object val2: object to Scatter
+ :param int root: 0
+ :rtype: object
+ :returns: val1
+ """
return val1
-class MPI_for_no_mpi4py:
+
+class MPI_for_no_mpi4py(object):
+
+ """
+ Provides a fake MPI implementation so that the user need not install
+ mpi4py.
+ """
+
def __init__(self):
+ """
+ Initialization
+ """
self.SUM = None
self.DOUBLE = float
self.INT = int
-
+ self.BOOL = bool
+
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
MPI = MPI_for_no_mpi4py()
comm = comm_for_no_mpi4py()
-
+
size = comm.Get_size()
rank = comm.Get_rank()
diff --git a/bet/__init__.py b/bet/__init__.py
index 8f8838d3..9d87ac59 100644
--- a/bet/__init__.py
+++ b/bet/__init__.py
@@ -1 +1,3 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
__all__ = ['sampling', 'calculateP', 'postProcess', 'loadBalance', 'util','Comm']
diff --git a/bet/calculateP/__init__.py b/bet/calculateP/__init__.py
index 1950ab39..de72a3b3 100644
--- a/bet/calculateP/__init__.py
+++ b/bet/calculateP/__init__.py
@@ -1,10 +1,16 @@
-r"""
-This subpackage provides classes and methods for calulating the probability
-measure :math:`P_{\Lambda}`.
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
-* :mod:`~bet.calculateP.calculateP` provides methods for approximating probability
- densities
-* :mod:`~bet.calculateP.simpleFunP` provides methods for creating simple function
- approximations of probability densisties
+r"""
+This subpackage provides classes and methods for calulating the
+probability measure :math:`P_{\Lambda}`.
+
+* :mod:`~bet.calculateP.calculateP` provides methods for approximating
+ probability densities
+* :mod:`~bet.calculateP.simpleFunP` provides methods for creating simple
+ function approximations of probability densisties
+* :mod:`~bet.calculateP.voronoiHistogram` provides methods for creating the
+ generating points for cells that define a regular grid for use by
+ :meth:`numpy.histogramdd` and for determining their volumes, etc. This
+ module is only for use by :mod:`~bet.calculateP.simpleFunP`.
"""
__all__ = ['calculateP', 'simpleFunP', 'voronoiHistogram']
diff --git a/bet/calculateP/calculateP.py b/bet/calculateP/calculateP.py
index 9a42bf53..40dc6b53 100644
--- a/bet/calculateP/calculateP.py
+++ b/bet/calculateP/calculateP.py
@@ -1,6 +1,8 @@
-r"""
-This module provides methods for calulating the probability
-measure :math:`P_{\Lambda}`.
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+r"""
+This module provides methods for calulating the probability measure
+:math:`P_{\Lambda}`.
* :mod:`~bet.calculateP.prob_emulated` provides a skeleton class and calculates
the probability for a set of emulation points.
@@ -13,11 +15,12 @@
from bet.Comm import *
import numpy as np
import scipy.spatial as spatial
+import bet.util as util
def emulate_iid_lebesgue(lam_domain, num_l_emulate):
"""
- Parition the parameter space using emulated samples into many voronoi cells.
- These samples are iid so that we can apply the standard MC
+ Parition the parameter space using emulated samples into many voronoi
+ cells. These samples are iid so that we can apply the standard MC
assumuption/approximation
:param lam_domain: The domain for each parameter for the model.
@@ -35,9 +38,10 @@ def emulate_iid_lebesgue(lam_domain, num_l_emulate):
lam_domain.shape[0]))+lam_domain[:, 0]
return lambda_emulate
-def prob_emulated(samples, data, rho_D_M, d_distr_samples, lam_domain,
+def prob_emulated(samples, data, rho_D_M, d_distr_samples,
lambda_emulate=None, d_Tree=None):
r"""
+
Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{emulate}})`, the
probability assoicated with a set of voronoi cells defined by
``num_l_emulate`` iid samples :math:`(\lambda_{emulate})`.
@@ -47,20 +51,21 @@ def prob_emulated(samples, data, rho_D_M, d_distr_samples, lam_domain,
:param data: The data from running the model given the samples.
:type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
:param rho_D_M: The simple function approximation of rho_D
- :type rho_D_M: :class:`~numpy.ndarray` of shape (M,mdim)
+ :type rho_D_M: :class:`~numpy.ndarray` of shape (M,)
:param d_distr_samples: The samples in the data space that define a
parition of D to for the simple function approximation
:type d_distr_samples: :class:`~numpy.ndarray` of shape (M, mdim)
:param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
- :param lam_domain: The domain for each parameter for the model.
- :type lam_domain: :class:`~numpy.ndarray` of shape (ndim, 2)
:param lambda_emulate: Samples used to partition the parameter space
:type lambda_emulate: :class:`~numpy.ndarray` of shape (num_l_emulate, ndim)
:rtype: tuple
:returns: (P, lambda_emulate, io_ptr, emulate_ptr, lam_vol)
"""
-
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
if lambda_emulate == None:
lambda_emulate = samples
if len(d_distr_samples.shape) == 1:
@@ -69,22 +74,16 @@ def prob_emulated(samples, data, rho_D_M, d_distr_samples, lam_domain,
d_Tree = spatial.KDTree(d_distr_samples)
# Determine which inputs go to which M bins using the QoI
- #io_ptr = dsearchn(d_distr_samples, data);
(_, io_ptr) = d_Tree.query(data)
# Determine which emulated samples match with which model run samples
l_Tree = spatial.KDTree(samples)
- (tree_length, emulate_ptr) = l_Tree.query(lambda_emulate)
+ (_, emulate_ptr) = l_Tree.query(lambda_emulate)
# Calculate Probabilties
P = np.zeros((lambda_emulate.shape[0],))
d_distr_emu_ptr = np.zeros(emulate_ptr.shape)
- #io_ptr_inverse = np.zeros(io_ptr.shape)
- # for i in range(rho_D_M.shape[0]):
- # Itemp = np.equal(io_ptr, i)
- # l_ind = np.nonzero(Itemp)
- # io_ptr_inverse[l_ind] = i
- d_distr_emu_ptr = io_ptr[emulate_ptr] #io_ptr_inverse[emulate_ptr]
+ d_distr_emu_ptr = io_ptr[emulate_ptr]
for i in range(rho_D_M.shape[0]):
Itemp = np.equal(d_distr_emu_ptr, i)
Itemp_sum = np.sum(Itemp)
@@ -94,8 +93,9 @@ def prob_emulated(samples, data, rho_D_M, d_distr_samples, lam_domain,
return (P, lambda_emulate, io_ptr, emulate_ptr)
-def prob(samples, data, rho_D_M, d_distr_samples, lam_domain, d_Tree=None):
+def prob(samples, data, rho_D_M, d_distr_samples, d_Tree=None):
r"""
+
Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{samples}})`, the
probability assoicated with a set of voronoi cells defined by the model
solves at :math:`(\lambda_{samples})` where the volumes of these voronoi
@@ -106,125 +106,57 @@ def prob(samples, data, rho_D_M, d_distr_samples, lam_domain, d_Tree=None):
:param data: The data from running the model given the samples.
:type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
:param rho_D_M: The simple function approximation of rho_D
- :type rho_D_M: :class:`~numpy.ndarray` of shape (M,mdim)
+ :type rho_D_M: :class:`~numpy.ndarray` of shape (M,)
:param d_distr_samples: The samples in the data space that define a
parition of D to for the simple function approximation
:type d_distr_samples: :class:`~numpy.ndarray` of shape (M, mdim)
:param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
- :param lam_domain: The domain for each parameter for the model.
- :type lam_domain: :class:`~numpy.ndarray` of shape (ndim, 2)
:rtype: tuple of :class:`~numpy.ndarray` of sizes (num_samples,),
(num_samples,), (ndim, num_l_emulate), (num_samples,), (num_l_emulate,)
- :returns: (P, lam_vol, lambda_emulate, io_ptr, emulate_ptr) where P is the
- probability associated with samples, lam_vol the volumes associated
- with the samples, io_ptr a pointer from data to M bins, and emulate_ptr
- a pointer from emulated samples to samples (in parameter space)
+ :returns: (P, lam_vol, lambda_emulate, io_ptr) where P is the
+ probability associated with samples, and lam_vol the volumes associated
+ with the samples, io_ptr a pointer from data to M bins.
"""
- # Calculate pointers and volumes
- (P, lambda_emulate, io_ptr, emulate_ptr) = prob_emulated(samples, data,
- rho_D_M, d_distr_samples, lam_domain, None, d_Tree)
-
- # Apply the standard MC approximation
- lam_vol = np.ones((samples.shape[0],))
- # Calculate Probabilities
- P = np.zeros((samples.shape[0],))
- for i in range(rho_D_M.shape[0]):
- Itemp = np.equal(io_ptr, i)
- Itemp_sum = np.sum(lam_vol[Itemp])
- Itemp_sum = comm.allreduce(Itemp_sum, op=MPI.SUM)
- if Itemp_sum > 0:
- P[Itemp] = rho_D_M[i]*lam_vol[Itemp]/Itemp_sum
-
- return (P, lam_vol, io_ptr, emulate_ptr)
-
-def prob_qhull(samples, data, rho_D_M, d_distr_samples,
- lam_domain, d_Tree=None):
- r"""
- Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{emulate}})`, the
- probability assoicated with a set of voronoi cells defined by
- ``num_l_emulate`` iid samples :math:`(\lambda_{emulate})`.
-
- This method is only intended when ``lam_domain`` is a generalized rectangle.
-
- :param samples: The samples in parameter space for which the model was run.
- :type samples: :class:`~numpy.ndarray` of shape (num_samples, ndim)
- :param data: The data from running the model given the samples.
- :type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
- :param rho_D_M: The simple function approximation of rho_D
- :type rho_D_M: :class:`~numpy.ndarray` of shape (M,mdim)
- :param d_distr_samples: The samples in the data space that define a
- parition of D to for the simple function approximation
- :type d_distr_samples: :class:`~numpy.ndarray` of shape (M,mdim)
- :param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
- :param lam_domain: The domain for each parameter for the model.
- :type lam_domain: :class:`~numpy.ndarray` of shape (ndim, 2)
- :returns: (P, io_ptr, lam_vol)
-
- """
- import pyhull
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
if len(d_distr_samples.shape) == 1:
d_distr_samples = np.expand_dims(d_distr_samples, axis=1)
-
if d_Tree == None:
d_Tree = spatial.KDTree(d_distr_samples)
+
+ # Set up local arrays for parallelism
+ local_index = range(0+rank, samples.shape[0], size)
+ samples_local = samples[local_index, :]
+ data_local = data[local_index, :]
+ local_array = np.array(local_index)
# Determine which inputs go to which M bins using the QoI
- #io_ptr = dsearchn(d_distr_samples, data);
- io_ptr = d_Tree.query(data)
+ (_, io_ptr) = d_Tree.query(data_local)
- # Calcuate the bounding region for the parameters
- lam_bound = np.copy(samples)
- lam_width = lam_domain[:, 1] - lam_domain[:, 0]
- nbins = d_distr_samples.shape[1]
- # Add fake samples outside of lam_domain to close Voronoi tesselations.
- pts_per_edge = nbins
- sides = np.zeros((2, pts_per_edge))
- for i in range(lam_domain.shape[0]):
- sides[i, :] = np.linspace(lam_domain[i, 0], lam_domain[i, 1],
- pts_per_edge)
- # add midpoints
- for i in range(lam_domain.shape[0]):
- new_pt = sides
- new_pt[i, :] = np.repeat(lam_domain[i, 0] - lam_width[i]/pts_per_edge,
- pts_per_edge, 0).transpose()
- lam_bound = np.vstack((lam_bound, new_pt))
- new_pt = sides
- new_pt[i, :] = np.repeat(lam_domain[i, 1] - lam_width[i]/pts_per_edge,
- pts_per_edge, 0).transpose()
- lam_bound = np.vstack((lam_bound, new_pt))
-
- # add corners
- corners = np.zeros((2**lam_domain.shape[0], lam_domain.shape[0]))
- for i in range(lam_domain.shape[0]):
- corners[i, :] = lam_domain[i, np.repeat(np.hstack((np.ones((1,
- 2**(i-1))), 2*np.ones((1, 2**(i - 1))))),
- 2**(lam_domain.shape[0]-i), 0).transpose()]
- corners[i, :] += lam_width[i]*np.repeat(np.hstack((np.ones((1,
- 2**(i-1))), -np.ones((1, 2**(i - 1))))),
- 2**(lam_domain.shape[0]-i)/pts_per_edge, 0).transpose()
-
- lam_bound = np.vstack((lam_bound, corners))
-
- # Calculate the Voronoi diagram for samples. Calculate the volumes of
- # the convex hulls of the corresponding Voronoi regions.
- lam_vol = np.zeros((samples.shape[-1],))
- for i in range((samples.shape[0])):
- vornoi = spatial.Voronoi(lam_bound)
- lam_vol[i] = float(pyhull.qconvex('Qt FA', vornoi.vertices).split()[-1])
-
- # Calculate probabilities.
- P = np.zeros((samples.shape[0],))
+ # Apply the standard MC approximation and
+ # calculate probabilities
+ P_local = np.zeros((samples_local.shape[0],))
for i in range(rho_D_M.shape[0]):
Itemp = np.equal(io_ptr, i)
- P[Itemp] = rho_D_M[i]*lam_vol[Itemp]/np.sum(lam_vol[Itemp])
- P = P/np.sum[P]
+ Itemp_sum = np.sum(Itemp)
+ Itemp_sum = comm.allreduce(Itemp_sum, op=MPI.SUM)
+ if Itemp_sum > 0:
+ P_local[Itemp] = rho_D_M[i]/Itemp_sum
+ P_global = util.get_global_values(P_local)
+ global_index = util.get_global_values(local_array)
+ P = np.zeros(P_global.shape)
+ P[global_index] = P_global[:]
+
+ lam_vol = (1.0/float(samples.shape[0]))*np.ones((samples.shape[0],))
return (P, lam_vol, io_ptr)
def prob_mc(samples, data, rho_D_M, d_distr_samples,
- lam_domain, lambda_emulate=None, d_Tree=None):
- """
+ lambda_emulate=None, d_Tree=None):
+ r"""
Calculates :math:`P_{\Lambda}(\mathcal{V}_{\lambda_{samples}})`, the
probability assoicated with a set of voronoi cells defined by the model
solves at :math:`(\lambda_{samples})` where the volumes of these voronoi
@@ -235,15 +167,13 @@ def prob_mc(samples, data, rho_D_M, d_distr_samples,
:param data: The data from running the model given the samples.
:type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
:param rho_D_M: The simple function approximation of rho_D
- :type rho_D_M: :class:`~numpy.ndarray` of shape (M, mdim)
+ :type rho_D_M: :class:`~numpy.ndarray` of shape (M,)
:param d_distr_samples: The samples in the data space that define a
parition of D to for the simple function approximation
:type d_distr_samples: :class:`~numpy.ndarray` of shape (M, mdim)
:param d_Tree: :class:`~scipy.spatial.KDTree` for d_distr_samples
- :param lam_domain: The domain for each parameter for the model.
- :type lam_domain: :class:`~numpy.ndarray` of shape (ndim,2)
- :param int num_l_emulate: The number of iid samples used to parition the
- parameter space
+ :param lambda_emulate: Samples used to partition the parameter space
+
:rtype: tuple of :class:`~numpy.ndarray` of sizes (num_samples,),
(num_samples,), (ndim, num_l_emulate), (num_samples,), (num_l_emulate,)
:returns: (P, lam_vol, lambda_emulate, io_ptr, emulate_ptr) where P is the
@@ -252,15 +182,28 @@ def prob_mc(samples, data, rho_D_M, d_distr_samples,
a pointer from emulated samples to samples (in parameter space)
"""
- # Calculate pointers and volumes
- (P, lambda_emulate, io_ptr, emulate_ptr) = prob_emulated(samples,
- data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate,
- d_Tree)
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+ if lambda_emulate == None:
+ lambda_emulate = samples
+ if len(d_distr_samples.shape) == 1:
+ d_distr_samples = np.expand_dims(d_distr_samples, axis=1)
+ if d_Tree == None:
+ d_Tree = spatial.KDTree(d_distr_samples)
+
+ # Determine which inputs go to which M bins using the QoI
+ (_, io_ptr) = d_Tree.query(data)
+ # Determine which emulated samples match with which model run samples
+ l_Tree = spatial.KDTree(samples)
+ (_, emulate_ptr) = l_Tree.query(lambda_emulate)
+
# Apply the standard MC approximation to determine the number of emulated
# samples per model run sample. This is for approximating
# \mu_Lambda(A_i \intersect b_j)
- lam_vol = np.zeros((samples.shape[0],)) #lambda_emulate),))
+ lam_vol = np.zeros((samples.shape[0],))
for i in range(samples.shape[0]):
lam_vol[i] = np.sum(np.equal(emulate_ptr, i))
clam_vol = np.copy(lam_vol)
@@ -268,16 +211,28 @@ def prob_mc(samples, data, rho_D_M, d_distr_samples,
lam_vol = clam_vol
lam_vol = lam_vol/(len(lambda_emulate)*size)
+ # Set up local arrays for parallelism
+ local_index = range(0+rank, samples.shape[0], size)
+ samples_local = samples[local_index, :]
+ data_local = data[local_index, :]
+ lam_vol_local = lam_vol[local_index]
+ local_array = np.array(local_index)
+
+ # Determine which inputs go to which M bins using the QoI
+ (_, io_ptr_local) = d_Tree.query(data_local)
+
# Calculate Probabilities
- P = np.zeros((samples.shape[0],))
+ P_local = np.zeros((samples_local.shape[0],))
for i in range(rho_D_M.shape[0]):
- Itemp = np.equal(io_ptr, i)
- # Prevent a divide by zero error
- Itemp_sum = np.sum(lam_vol[Itemp])
+ Itemp = np.equal(io_ptr_local, i)
+ Itemp_sum = np.sum(lam_vol_local[Itemp])
Itemp_sum = comm.allreduce(Itemp_sum, op=MPI.SUM)
if Itemp_sum > 0:
- P[Itemp] = rho_D_M[i]*lam_vol[Itemp]/Itemp_sum
-
+ P_local[Itemp] = rho_D_M[i]*lam_vol_local[Itemp]/Itemp_sum
+ P_global = util.get_global_values(P_local)
+ global_index = util.get_global_values(local_array)
+ P = np.zeros(P_global.shape)
+ P[global_index] = P_global[:]
return (P, lam_vol, lambda_emulate, io_ptr, emulate_ptr)
diff --git a/bet/calculateP/simpleFunP.py b/bet/calculateP/simpleFunP.py
index 7da96544..e2340194 100644
--- a/bet/calculateP/simpleFunP.py
+++ b/bet/calculateP/simpleFunP.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This module provides methods for creating simple funciton approximations to be
used by :mod:`~bet.calculateP.calculateP`.
@@ -6,6 +8,8 @@
import numpy as np
import scipy.spatial as spatial
import bet.calculateP.voronoiHistogram as vHist
+import collections
+import bet.util as util
def unif_unif(data, Q_ref, M=50, bin_ratio=0.2, num_d_emulate=1E6):
r"""
@@ -43,39 +47,34 @@ def unif_unif(data, Q_ref, M=50, bin_ratio=0.2, num_d_emulate=1E6):
:param Q_ref: :math:`Q(`\lambda_{reference})`
:type Q_ref: :class:`~numpy.ndarray` of size (mdim,)
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree` is
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree` is
the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
- if len(data.shape) == 1:
- data = np.expand_dims(data, axis=1)
- # Determine the appropriate bin size for this QoI
- data_max = np.max(data, 0)
- data_min = np.min(data, 0)
- bin_size = (data_max-data_min)*bin_ratio
+ data = util.fix_dimensions_data(data)
+ bin_size = (np.max(data, 0) - np.min(data, 0))*bin_ratio
r'''
Create M samples defining M Voronoi cells (i.e., "bins") in D used to
define the simple function approximation :math:`\rho_{\mathcal{D},M}`.
This does not have to be random, but here we assume this to be the case.
- We can choose these samples deterministically but that fails to scale
- with dimension efficiently.
+ We can choose these samples deterministically but that fails to scale with
+ dimension efficiently.
Note that these M samples are chosen for the sole purpose of determining
the bins used to create the approximation to :math:`rho_{\mathcal{D}}`.
- We call these M samples "d_distr_samples" because they are samples
- on the data space and the distr implies these samples are chosen
- to create the approximation to the probability measure (distribution)
- on D.
+ We call these M samples "d_distr_samples" because they are samples on the
+ data space and the distr implies these samples are chosen to create the
+ approximation to the probability measure (distribution) on D.
- Note that we create these samples in a set containing the hyperrectangle
- in order to get output cells with zero probability. If all of the
- d_dstr_samples were taken from within the support of :math:`\rho_{\mathcal{D}}`
- then each of the M bins would have positive probability. This would
- in turn imply that the support of :math:`\rho_{\Lambda}` is all of
- :math:`\Lambda`.
+ Note that we create these samples in a set containing the hyperrectangle in
+ order to get output cells with zero probability. If all of the
+ d_dstr_samples were taken from within the support of
+ :math:`\rho_{\mathcal{D}}` then each of the M bins would have positive
+ probability. This would in turn imply that the support of
+ :math:`\rho_{\Lambda}` is all of :math:`\Lambda`.
'''
if rank == 0:
d_distr_samples = 1.5*bin_size*(np.random.random((M,
@@ -119,46 +118,6 @@ def unif_unif(data, Q_ref, M=50, bin_ratio=0.2, num_d_emulate=1E6):
'''
return (rho_D_M, d_distr_samples, d_Tree)
-def hist_regular(data, distr_samples, nbins):
- """
- create nbins regulary spaced bins
- check to make sure each bin has about 1 data sample per bin, if not
- recompute bins
- (hist, edges) = histdd(distr_samples, bins)
- http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogramdd.html#numpy.histogramdd
- determine d_distr_samples from edges
- """
- pass
-
-def hist_gaussian(data, distr_samples, nbins):
- """
- determine mean, standard deviation of distr_samples
- partition D into nbins of equal probability for N(mean, sigma)
- check to make sure each bin has about 1 data sample per bin, if not
- recompute bins
- (hist, edges) = histdd(distr_samples, bins)
- determine d_distr_samples from edges
- """
- pass
-
-def hist_unif(data, distr_samples, nbins):
- """
- same as hist_regular bit with uniformly spaced bins
- unif_unif can and should call this function
- """
- pass
-
-def gaussian_regular(data, Q_ref, std, nbins, num_d_emulate=1E6):
- pass
- #return (d_distr_prob, d_distr_samples, d_Tree)
-
-def multivariate_gaussian(x, mean, std):
- dim = len(mean)
- detDiagCovMatrix = np.sqrt(np.prod(np.diag(std(std))))
- frac = (2.0*np.pi)**(-dim/2.0) * (1.0/detDiagCovMatrix)
- fprime = x-mean
- return frac*np.exp(-0.5*np.dot(fprime, 1.0/np.diag(std*std)))
-
def normal_normal(Q_ref, M, std, num_d_emulate=1E6):
r"""
Creates a simple function approximation of :math:`\rho_{\mathcal{D},M}`
@@ -170,15 +129,15 @@ def normal_normal(Q_ref, M, std, num_d_emulate=1E6):
:math:`\rho_{\mathcal{D},M}` The choice of M is something of an "art" -
play around with it and you can get reasonable results with a
relatively small number here like 50.
- :param int num_d_emulate: Number of samples used to emulate using an MC
+ int num_d_emulate: Number of samples used to emulate using an MC
assumption
:param Q_ref: :math:`Q(\lambda_{reference})`
:type Q_ref: :class:`~numpy.ndarray` of size (mdim,)
:param std: The standard deviation of each QoI
:type std: :class:`~numpy.ndarray` of size (mdim,)
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree` is
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree` is
the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
@@ -186,10 +145,18 @@ def normal_normal(Q_ref, M, std, num_d_emulate=1E6):
r'''Create M smaples defining M bins in D used to define
:math:`\rho_{\mathcal{D},M}` rho_D is assumed to be a multi-variate normal
distribution with mean Q_ref and standard deviation std.'''
+ if not isinstance(Q_ref, collections.Iterable):
+ Q_ref = np.array([Q_ref])
+ if not isinstance(std, collections.Iterable):
+ std = np.array([std])
- covariance = np.diag(std*std)
+ covariance = std**2
d_distr_samples = np.zeros((M, len(Q_ref)))
+ print "d_distr_samples.shape", d_distr_samples.shape
+ print "Q_ref.shape", Q_ref.shape
+ print "std.shape", std.shape
+
if rank == 0:
for i in range(len(Q_ref)):
d_distr_samples[:, i] = np.random.normal(Q_ref[i], std[i], M)
@@ -254,12 +221,11 @@ def unif_normal(Q_ref, M, std, num_d_emulate=1E6):
:param std: The standard deviation of each QoI
:type std: :class:`~numpy.ndarray` of size (mdim,)
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree` is
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree` is
the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
- import scipy.stats as stats
r'''Create M smaples defining M bins in D used to define
:math:`\rho_{\mathcal{D},M}` rho_D is assumed to be a multi-variate normal
distribution with mean Q_ref and standard deviation std.'''
@@ -306,10 +272,6 @@ def unif_normal(Q_ref, M, std, num_d_emulate=1E6):
# solving the model EVER! This can be done "offline" so to speak.
return (rho_D_M, d_distr_samples, d_Tree)
-def gaussian_unif(data, Q_ref, std, nbins, num_d_emulate=1E6):
- pass
- #return (d_distr_prob, d_distr_samples, d_Tree)
-
def uniform_hyperrectangle_user(data, domain, center_pts_per_edge=1):
r"""
Creates a simple funciton appoximation of :math:`\rho_{\mathcal{D},M}`
@@ -330,71 +292,68 @@ def uniform_hyperrectangle_user(data, domain, center_pts_per_edge=1):
additional two points will be added to create the bounding layer
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree`
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree`
is the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
- # determine the center of the domain
- if len(domain.shape) == 1:
- domain = np.expand_dims(domain, axis=1)
+ # make sure the shape of the data and the domain are correct
+ data = util.fix_dimensions_data(data)
+ domain = util.fix_dimensions_data(domain, data.shape[1])
domain_center = np.mean(domain, 0)
- domain_min = np.min(domain, 0)
- domain_max = np.max(domain, 0)
- domain_lengths = domain_max - domain_min
-
- # determine the ratio of the lengths of the domain to the lengths of the
- # hyperrectangle containing the data
- if len(data.shape) == 1:
- data = np.expand_dims(data, axis=1)
- data_max = np.max(data, 0)
- data_min = np.min(data, 0)
- data_lengths = data_max - data_min
- bin_ratios = domain_lengths/data_lengths
-
- return uniform_hyperrectangle(data, domain_center, bin_ratios,
+ domain_lengths = np.max(domain, 0) - np.min(domain, 0)
+
+ return uniform_hyperrectangle_binsize(data, domain_center, domain_lengths,
center_pts_per_edge)
def uniform_hyperrectangle_binsize(data, Q_ref, bin_size, center_pts_per_edge=1):
r"""
Creates a simple function approximation of :math:`\rho_{\mathcal{D},M}`
where :math:`\rho_{\mathcal{D},M}` is a uniform probability density
- centered at Q_ref with bin_size of the width
- of D.
+ centered at Q_ref with bin_size of the width of D.
Since rho_D is a uniform distribution on a hyperrectanlge we should be able
to represent it exactly with ``M = 3^mdim`` or rather
``len(d_distr_samples) == 3^mdim``.
- :param bin_size: The size used to determine the width of the
- uniform distribution
- :type bin_size: double or list()
- :param int num_d_emulate: Number of samples used to emulate using an MC
+ :param bin_size: The size used to determine the width of the uniform
+ distribution
+ :type bin_size: double or list()
+ :param int num_d_emulate: Number of samples used to emulate using an MC
assumption
- :param data: Array containing QoI data where the QoI is mdim diminsional
- :type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
- :param Q_ref: :math:`Q(\lambda_{reference})`
- :type Q_ref: :class:`~numpy.ndarray` of size (mdim,)
- :param list() center_pts_per_edge: number of center points per edge and
- additional two points will be added to create the bounding layer
-
- :rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree`
- is the :class:`~scipy.spatial.KDTree` for d_distr_samples
+ :param data: Array containing QoI data where the QoI is mdim diminsional
+ :type data: :class:`~numpy.ndarray` of size (num_samples, mdim)
+ :param Q_ref: :math:`Q(\lambda_{reference})`
+ :type Q_ref: :class:`~numpy.ndarray` of size (mdim,)
+ :param list() center_pts_per_edge: number of center points per edge
+ and additional two points will be added to create the bounding layer
+
+ :rtype: tuple :returns: (rho_D_M, d_distr_samples, d_Tree) where
+ ``rho_D_M`` is (M,) and ``d_distr_samples`` are (M, mdim)
+ :class:`~numpy.ndarray` and `d_Tree` is the :class:`~scipy.spatial.KDTree`
+ for d_distr_samples
"""
- if len(data.shape) == 1:
- data = np.expand_dims(data, axis=1)
- data_max = np.max(data, 0)
- data_min = np.min(data, 0)
-
- sur_domain = np.zeros((data.shape[1], 2))
- sur_domain[:, 0] = data_min
- sur_domain[:, 1] = data_max
+ data = util.fix_dimensions_data(data)
+
+ if not isinstance(center_pts_per_edge, collections.Iterable):
+ center_pts_per_edge = np.ones((data.shape[1],)) * center_pts_per_edge
+ else:
+ if not len(center_pts_per_edge) == data.shape[1]:
+ center_pts_per_edge = np.ones((data.shape[1],))
+ print 'Warning: center_pts_per_edge dimension mismatch.'
+ print 'Using 1 in each dimension.'
+ if np.any(np.less(center_pts_per_edge, 0)):
+ print 'Warning: center_pts_per_edge must be greater than 0'
+ if not isinstance(bin_size, collections.Iterable):
+ bin_size = bin_size*np.ones((data.shape[1],))
+ if np.any(np.less(bin_size, 0)):
+ print 'Warning: center_pts_per_edge must be greater than 0'
+
+ sur_domain = np.array([np.min(data, 0), np.max(data, 0)]).transpose()
+
points, _, rect_domain = vHist.center_and_layer1_points_binsize(center_pts_per_edge,
Q_ref, bin_size, sur_domain)
- edges = vHist.edges_regular_binsize(center_pts_per_edge, Q_ref, bin_size,
- sur_domain)
+ edges = vHist.edges_regular(center_pts_per_edge, rect_domain, sur_domain)
_, volumes, _ = vHist.histogramdd_volumes(edges, points)
return vHist.simple_fun_uniform(points, volumes, rect_domain)
@@ -422,37 +381,19 @@ def uniform_hyperrectangle(data, Q_ref, bin_ratio, center_pts_per_edge=1):
additional two points will be added to create the bounding layer
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree`
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree`
is the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
- if len(data.shape) == 1:
- data = np.expand_dims(data, axis=1)
- data_max = np.max(data, 0)
- data_min = np.min(data, 0)
-
- # TO-DO: Check for inputted center_pts_per_edge in case given as list
- # or as numpy array to see if dimensions match data space dimensions and
- # that positive integer values are being used. Also, create this change
- # elsewhere since center_pts_per_edge is only a scalar if dim(D)=1.
- if not isinstance(center_pts_per_edge, np.ndarray):
- center_pts_per_edge = np.ones((data.shape[1])) * center_pts_per_edge
- else:
- if not len(center_pts_per_edge) == data.shape[1]:
- center_pts_per_edge = np.ones((data.shape[1]))
- print 'Warning: center_pts_per_edge dimension mismatch.'
- print 'Using 1 in each dimension.'
+ data = util.fix_dimensions_data(data)
- sur_domain = np.zeros((data.shape[1], 2))
- sur_domain[:, 0] = data_min
- sur_domain[:, 1] = data_max
- points, _, rect_domain = vHist.center_and_layer1_points(center_pts_per_edge,
- Q_ref, bin_ratio, sur_domain)
- edges = vHist.edges_regular(center_pts_per_edge, Q_ref, bin_ratio,
- sur_domain)
- _, volumes, _ = vHist.histogramdd_volumes(edges, points)
- return vHist.simple_fun_uniform(points, volumes, rect_domain)
+ if not isinstance(bin_ratio, collections.Iterable):
+ bin_ratio = bin_ratio*np.ones((data.shape[1], ))
+
+ bin_size = (np.max(data, 0) - np.min(data, 0))*bin_ratio
+ return uniform_hyperrectangle_binsize(data, Q_ref, bin_size,
+ center_pts_per_edge)
def uniform_data(data):
r"""
@@ -470,12 +411,12 @@ def uniform_data(data):
additional two points will be added to create the bounding layer
:rtype: tuple
- :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` and
- ``d_distr_samples`` are (mdim, M) :class:`~numpy.ndarray` and `d_Tree`
+ :returns: (rho_D_M, d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and
+ ``d_distr_samples`` are (M, mdim) :class:`~numpy.ndarray` and `d_Tree`
is the :class:`~scipy.spatial.KDTree` for d_distr_samples
"""
- d_distr_prob = np.ones((data.shape[1],))
- if len(data.shape) == 1:
- data = np.expand_dims(data, axis=1)
+ data = util.fix_dimensions_data(data)
+
+ d_distr_prob = np.ones((data.shape[0],), dtype=np.float)/data.shape[0]
d_Tree = spatial.KDTree(data)
return (d_distr_prob, data, d_Tree)
diff --git a/bet/calculateP/voronoiHistogram.py b/bet/calculateP/voronoiHistogram.py
index 6e0451bb..28eac24f 100644
--- a/bet/calculateP/voronoiHistogram.py
+++ b/bet/calculateP/voronoiHistogram.py
@@ -1,9 +1,18 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# -*- coding: utf-8 -*-
+"""
+This module provides methods for creating the edges and generating point for
+regular (hyperrectangle) multidimensional voronoi cells and for determining the
+volumes of these cells.
+"""
+
import numpy as np
from scipy import spatial
import bet.util as util
-def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size, sur_domain):
+def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size,
+ sur_domain):
"""
Generates a regular grid of center points that define the voronoi
tesselation of exactly the interior of a hyperrectangle centered at
@@ -19,30 +28,30 @@ def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size, sur_do
:param center: location of the center of the hyperrectangle
:type center: :class:`numpy.ndarray` of shape (mdim,)
:param r_size: size of the length of the sides of the
- hyperrectangle to the surrounding domain
+ hyperrectangle rect_domain to definie voronoi cells for
:type r_size: double or list()
:param sur_domain: minima and maxima of each dimension defining the
- surrounding domain
+ surrounding domain. The surrounding domain is the bounded domain
+ in the data space (i.e. the data domain).
:type sur_domain: :class:`numpy.ndarray` of shape (mdim, 2)
:rtype: tuple
- :returns: (points, interior_and_layer1) where where points is an
- :class:`numpy.ndarray` of shape (num_points, dim), interior_and_layer1
- is a list() of dim :class:`numpy.ndarray`s of shape
- (center_pts_per_edge+2,).
+ :returns: (points, interior_and_layer1, rect_domain) where where points is
+ an :class:`numpy.ndarray` of shape (num_points, dim),
+ interior_and_layer1 is a list() of dim :class:`numpy.ndarray`s of shape
+ (center_pts_per_edge+2,), rect_domain is a :class:`numpy.ndarray` of
+ shape (mdim, 2)
"""
- if np.all(np.greater(r_size, 1)):
+ # determine the hyperrectangle (rect_domain) defined by center and r_size
+ rect_width = r_size*np.ones(sur_domain[:, 0].shape)
+ rect_domain = np.column_stack([center - .5*rect_width,
+ center + .5*rect_width])
+ if np.all(np.greater(r_size, rect_width)):
msg = "The hyperrectangle defined by this size is larger than the"
msg += "original domain."
print msg
-
- # determine the hyperrectangle defined by center and r_size
- rect_width = r_size*np.ones(sur_domain[:,0].shape)
- rect_domain = np.empty(sur_domain.shape)
- rect_domain[:, 0] = center - .5*rect_width
- rect_domain[:, 1] = center + .5*rect_width
-
+
# determine the locations of the points for the 1st bounding layer
layer1_left = rect_domain[:, 0]-rect_width/(2*center_pts_per_edge)
layer1_right = rect_domain[:, 1]+rect_width/(2*center_pts_per_edge)
@@ -55,14 +64,9 @@ def center_and_layer1_points_binsize(center_pts_per_edge, center, r_size, sur_do
interior_and_layer1.append(int_l1)
# use meshgrid to make the hyperrectangle shells
- if sur_domain.shape[0] == 1:
- points = interior_and_layer1[0]
- else:
- points = util.meshgrid_ndim(interior_and_layer1)
-
+ points = util.meshgrid_ndim(interior_and_layer1)
return (points, interior_and_layer1, rect_domain)
-
def center_and_layer1_points(center_pts_per_edge, center, r_ratio, sur_domain):
r"""
Generates a regular grid of center points that define the voronoi
@@ -79,17 +83,19 @@ def center_and_layer1_points(center_pts_per_edge, center, r_ratio, sur_domain):
:param center: location of the center of the hyperrectangle
:type center: :class:`numpy.ndarray` of shape (mdim,)
:param r_ratio: ratio of the length of the sides of the
- hyperrectangle to the surrounding domain
+ hyperrectangle rect_domain to definie voronoi cells for
:type r_ratio: double or list()
:param sur_domain: minima and maxima of each dimension defining the
- surrounding domain
+ surrounding domain. The surrounding domain is the bounded domain
+ in the data space (i.e. the data domain).
:type sur_domain: :class:`numpy.ndarray` of shape (mdim, 2)
:rtype: tuple
- :returns: (points, interior_and_layer1) where where points is an
- :class:`numpy.ndarray` of shape (num_points, dim), interior_and_layer1
- is a list() of dim :class:`numpy.ndarray`s of shape
- (center_pts_per_edge+2,).
+ :returns: (points, interior_and_layer1, rect_domain) where where points is
+ an :class:`numpy.ndarray` of shape (num_points, dim),
+ interior_and_layer1 is a list() of dim :class:`numpy.ndarray`s of shape
+ (center_pts_per_edge+2,), rect_domain is a :class:`numpy.ndarray` of
+ shape (mdim, 2).
"""
if np.all(np.greater(r_ratio, 1)):
@@ -97,145 +103,50 @@ def center_and_layer1_points(center_pts_per_edge, center, r_ratio, sur_domain):
msg += "original domain."
print msg
- # determine the width of the surrounding domain
- sur_width = sur_domain[:, 1]-sur_domain[:, 0]
- # determine the hyperrectangle defined by center and r_ratio
- rect_width = r_ratio*sur_width
- rect_domain = np.empty(sur_domain.shape)
- rect_domain[:, 0] = center - .5*rect_width
- rect_domain[:, 1] = center + .5*rect_width
-
- # determine the locations of the points for the 1st bounding layer
- layer1_left = rect_domain[:, 0]-rect_width/(2*center_pts_per_edge)
- layer1_right = rect_domain[:, 1]+rect_width/(2*center_pts_per_edge)
-
- interior_and_layer1 = list()
- for dim in xrange(sur_domain.shape[0]):
- # create interior points and 1st layer
- int_l1 = np.linspace(layer1_left[dim],
- layer1_right[dim], center_pts_per_edge[dim]+2)
- interior_and_layer1.append(int_l1)
+ # determine r_size from the width of the surrounding domain
+ r_size = r_ratio*(sur_domain[:, 1]-sur_domain[:, 0])
- # use meshgrid to make the hyperrectangle shells
- if sur_domain.shape[0] == 1:
- points = interior_and_layer1[0]
- else:
- points = util.meshgrid_ndim(interior_and_layer1)
- return (points, interior_and_layer1, rect_domain)
+ return center_and_layer1_points_binsize(center_pts_per_edge, center,
+ r_size, sur_domain)
-def edges_regular_binsize(center_pts_per_edge, center, r_size, sur_domain):
+def edges_regular(center_pts_per_edge, rect_domain, sur_domain):
"""
Generates a sequence of arrays describing the edges of the finite voronoi
cells in each direction. The voronoi tesselation is defined by regular grid
of center points that define the voronoi tesselation of exactly the
- interior of a hyperrectangle centered at ``center`` with sides of length
- ``r_size`` and the layers of voronoi cells that bound these
- interior cells. The resulting voronoi tesselation exactly represents the
- hyperrectangle. The bounding voronoi cells are made finite by bounding them
- with an additional layer to represent ``sur_domain``.
+ interior of a hyperrectangle defined by ``rect_domain`` and the layers of
+ voronoi cells that bound these interior cells. The resulting voronoi
+ tesselation exactly represents the hyperrectangle. The bounding voronoi
+ cells are made finite by bounding them with an additional layer to
+ represent ``sur_domain``.
This method can also be used to tile ``sur_domain`` with points to define
voronoi regions if the user sets ``r_ratio = 1``. use binratio below
:param list() center_pts_per_edge: number of center points per edge and
additional two points will be added to create the bounding layer
- :param center: location of the center of the hyperrectangle
- :type center: :class:`numpy.ndarray` of shape (mdim,)
- :param r_size: size of the length of the sides of the
- hyperrectangle to the surrounding domain
- :type r_size: double or list()
- :param sur_domain: minima and maxima of each dimension defining the
- surrounding domain
- :type sur_domain: :class:`numpy.ndarray` of shape (mdim, 2)
-
- :rtype: tuple
- :returns: (points, interior_and_layer1, interior_and_doublelayer) where
- where points is an :class:`numpy.ndarray` of shape (num_points, dim),
- interior_and_layer1 and interior_and_layer2 are lists of dim
- :class:`numpy.ndarray`s of shape (center_pts_per_edge+2,) and
- (center_pts_per_edge+4,) respectively.
-
- """
-
- # determine the hyperrectangle defined by center and r_size
- rect_width = r_size*np.ones(sur_domain[:,0].shape)
- rect_domain = np.empty(sur_domain.shape)
- rect_domain[:, 0] = center - .5*rect_width
- rect_domain[:, 1] = center + .5*rect_width
-
- rect_edges = list()
- rect_and_sur_edges = list()
- for dim in xrange(sur_domain.shape[0]):
- # create interior points and 1st layer
- int_l1 = np.linspace(rect_domain[dim, 0],
- rect_domain[dim, 1], center_pts_per_edge[dim]+1)
- rect_edges.append(int_l1)
- # add layers together using indexing fu
- int_l2 = np.zeros((int_l1.shape[0]+2,))
- int_l2[1:-1] = int_l1
- int_l2[0] = sur_domain[dim, 0]
- int_l2[-1] = sur_domain[dim, 1]
- rect_and_sur_edges.append(int_l2)
-
- return rect_and_sur_edges
-
-def edges_regular(center_pts_per_edge, center, r_ratio, sur_domain):
- """
- Generates a sequence of arrays describing the edges of the finite voronoi
- cells in each direction. The voronoi tesselation is defined by regular grid
- of center points that define the voronoi tesselation of exactly the
- interior of a hyperrectangle centered at ``center`` with sides of length
- ``r_ratio*sur_width`` and the layers of voronoi cells that bound these
- interior cells. The resulting voronoi tesselation exactly represents the
- hyperrectangle. The bounding voronoi cells are made finite by bounding them
- with an additional layer to represent ``sur_domain``.
-
- This method can also be used to tile ``sur_domain`` with points to define
- voronoi regions if the user sets ``r_ratio = 1``.
-
- :param list() center_pts_per_edge: number of center points per edge and
- additional two points will be added to create the bounding layer
- :param center: location of the center of the hyperrectangle
- :type center: :class:`numpy.ndarray` of shape (mdim,)
- :param r_ratio: ratio of the length of the sides of the
- hyperrectangle to the surrounding domain
- :type r_ratio: double or list()
+ :param rect_domain: The rectangular domain to define the voronoi
+ tesselation for. This domain should be contained in the ``sur_domain``.
+ :type rect_domain: :class:`numpy.ndarray` of shape (mdim, 2)
:param sur_domain: minima and maxima of each dimension defining the
- surrounding domain
+ surrounding domain. The surrounding domain is the bounded domain
+ in the data space (i.e. the data domain).
:type sur_domain: :class:`numpy.ndarray` of shape (mdim, 2)
:rtype: tuple
- :returns: (points, interior_and_layer1, interior_and_doublelayer) where
- where points is an :class:`numpy.ndarray` of shape (num_points, dim),
- interior_and_layer1 and interior_and_layer2 are lists of dim
- :class:`numpy.ndarray`s of shape (center_pts_per_edge+2,) and
- (center_pts_per_edge+4,) respectively.
+ :returns: interior_and_layer1 is a list of dim :class:`numpy.ndarray`s of
+ shape (center_pts_per_edge+2,)
"""
- if np.all(np.greater(r_ratio, 1)):
- msg = "The hyperrectangle defined by this ratio is larger than the"
- msg += "original domain."
- print msg
-
- # determine the width of the surrounding domain
- sur_width = sur_domain[:, 1]-sur_domain[:, 0]
- # determine the hyperrectangle defined by center and r_ratio
- rect_width = r_ratio*sur_width
- rect_domain = np.empty(sur_domain.shape)
- rect_domain[:, 0] = center - .5*rect_width
- rect_domain[:, 1] = center + .5*rect_width
-
- if np.all(np.greater_equal(sur_domain[:, 0], rect_domain[:, 0])):
- msg = "The hyperrectangle defined by this ratio is larger than the"
+ if np.any(np.greater_equal(sur_domain[:, 0], rect_domain[:, 0])):
+ msg = "The hyperrectangle defined by this size is larger than the"
msg += "original domain."
print msg
-
- elif np.all(np.less_equal(sur_domain[:, 1], rect_domain[:, 1])):
- msg = "The hyperrectangle defined by this ratio is larger than the"
+ elif np.any(np.less_equal(sur_domain[:, 1], rect_domain[:, 1])):
+ msg = "The hyperrectangle defined by this size is larger than the"
msg += "original domain."
print msg
-
-
+
rect_edges = list()
rect_and_sur_edges = list()
for dim in xrange(sur_domain.shape[0]):
@@ -248,7 +159,7 @@ def edges_regular(center_pts_per_edge, center, r_ratio, sur_domain):
int_l2[1:-1] = int_l1
int_l2[0] = sur_domain[dim, 0]
int_l2[-1] = sur_domain[dim, 1]
- rect_and_sur_edges.append(int_l2)
+ rect_and_sur_edges.append(int_l2)
return rect_and_sur_edges
@@ -273,66 +184,45 @@ def edges_from_points(points):
edges.append((points_dim[1:]+points_dim[:-1])/2)
return edges
-def points_from_edges(edges):
- """
- Given a sequence of arrays describing the edges of bins formed by voronoi
- cells along each dimensions returns the voronoi points that would generate
- these cells.
-
- ..todo:: This method only creates points in the center of the bins. Needs
- Needs to be adjusted so that it creates points in the voronoi cell
- locations.
-
- :param edges: A sequence of arrays describing the edges of bins along each
- dimension.
- :type edges: A list() containing mdim :class:`numpy.ndarray`s of shape
- (nbins_per_dim+1,)
-
- :returns: points, the coordindates of voronoi points that would generate
- these bins
- :rtype: :class:`numpy.ndarray` of shape (num_points, dim) where num_points
- = product(nbins_per_dim)
-
- """
- #TODO: Implement me.
- # create a point inside each of the bins defined by the edges
- centers = list()
- for e in edges:
- centers.append((e[1:]+e[:-1])/2)
-
def histogramdd_volumes(edges, points):
"""
Given a sequence of arrays describing the edges of voronoi cells (bins)
along each dimension and an 'ij' ordered sequence of points (1 per voronoi
cell) returns a list of the volumes associated with these voronoic cells.
- :param edges:
- :type edges:
+ :param edges: A sequence of arrays describing the edges of bins along
+ each dimension.
+ :type edges: A list() containing mdim :class:`numpy.ndarray`s of shape
+ (nbins_per_dim+1,)
:param points: points used to define the voronoi tesselation (only the
points that define regions of finite volumes)
:type points: :class:`numpy.ndarrray` of shape (num_points, mdim)
- :returns: finite volumes associated with ``points``
- :rtype: :class:`numpy.ndarray` of shape (len(points),)
+ :rtype: tuple of (H, volume, edges)
+ :returns: H is the result of :meth:`np.histogramdd(points, edges,
+ normed=True)`, volumes is a :class:`numpy.ndarray` of shape
+ (len(points),) continaing the finite volumes associated with
+ ``points``
"""
# adjust edges
points_max = np.max(points, 0)
points_min = np.min(points, 0)
+ # Use a loop because the number of edges per dimension is not the same.
for dim, e in enumerate(edges):
if len(edges) == 1:
if e[0] >= points_min:
- e[0] = points_min-1
+ e[0] = points_min-np.finfo(float).eps
if e[-1] <= points_max:
- e[-1] = points_max+1
+ e[-1] = points_max+np.finfo(float).eps
else:
if e[0] >= points_min[dim]:
- e[0] = points_min[dim]-1
+ e[0] = points_min[dim]-np.finfo(float).eps
if e[-1] <= points_max[dim]:
- e[-1] = points_max[dim]+1
+ e[-1] = points_max[dim]+np.finfo(float).eps
H, _ = np.histogramdd(points, edges, normed=True)
- volume = 1/(H*points.shape[0])
+ volume = 1.0/(H*points.shape[0]) # account for number of bins
# works as long as points are created with 'ij' indexing in meshgrid
volume = volume.ravel()
return H, volume, edges
@@ -358,17 +248,14 @@ def simple_fun_uniform(points, volumes, rect_domain):
`d_Tree` is the :class:`~scipy.spatial.KDTree` for points
"""
- if len(points.shape) == 1:
- points = np.expand_dims(points, axis=1)
-
- rect_left = np.repeat([rect_domain[:, 0]], points.shape[0], 0)
- rect_right = np.repeat([rect_domain[:,1]], points.shape[0], 0)
- rect_left = np.all(np.greater_equal(points, rect_left), axis=1)
- rect_right = np.all(np.less_equal(points, rect_right), axis=1)
- inside = np.logical_and(rect_left, rect_right)
- rho_D_M = np.zeros(volumes.shape)
- rho_D_M[inside] = volumes[inside]/np.sum(volumes[inside]) # normalize on Lambda not D
+ util.fix_dimensions_data(points)
+ inside = np.logical_and(np.all(np.greater_equal(points, rect_domain[:, 0]),
+ axis=1), np.all(np.less_equal(points, rect_domain[:, 1]), axis=1))
+
+ rho_D_M = np.zeros(volumes.shape)
+ # normalize on Lambda not D
+ rho_D_M[inside] = volumes[inside]/np.sum(volumes[inside])
d_Tree = spatial.KDTree(points)
return (rho_D_M, points, d_Tree)
diff --git a/bet/loadBalance/__init__.py b/bet/loadBalance/__init__.py
deleted file mode 100644
index 949dbfe6..00000000
--- a/bet/loadBalance/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-This subpackage contains
-
-* :class:`~bet.loadBalance.load_balance` an interface to control the scheduling and load
- balancing of model runs on an HPC infrastructure
-* :mod:`~bet.loadBalance.lb_DIAMOND` implements the :class:`~bet.loadBalance.load_balance` infrastructure
- for the DIAMOND model
-* :mod:`~bet.loadBalance.lb_PADCIRC` implements the :class:`~bet.loadBalance.load_balance` infrastructure
- for the PADCIRC model
-
-"""
-__all__ = ['load_balance','lb_DIAMOND','lb_PADCIRC']
diff --git a/bet/loadBalance/lb_DIAMOND.py b/bet/loadBalance/lb_DIAMOND.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/bet/loadBalance/lb_PADCIRC.py b/bet/loadBalance/lb_PADCIRC.py
deleted file mode 100644
index e7fa9dd4..00000000
--- a/bet/loadBalance/lb_PADCIRC.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-This is a placeholder. See the ``run_framework`` subpackage of `PolyADCIRC
-`_ and the examples in
-``examples/fromADCIRCMap`` for a yet to be generalized prototype.
-"""
diff --git a/bet/loadBalance/load_balance.py b/bet/loadBalance/load_balance.py
deleted file mode 100644
index d36fc27d..00000000
--- a/bet/loadBalance/load_balance.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""
-This module handles the following
- store HPC topology
- set up file structure for data, parameter fields
- compose parameter fields
- prep computational domain(s) (model specific)
- run model (model specific)
- read and preprocess data (model specific)
-
-"""
-class load_balance():
- pass
diff --git a/bet/postProcess/__init__.py b/bet/postProcess/__init__.py
index 212068d8..e1ff3ea1 100644
--- a/bet/postProcess/__init__.py
+++ b/bet/postProcess/__init__.py
@@ -1,8 +1,12 @@
-"""
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+r"""
This subpackage contains
-* :class:`~bet.postProcess.plotP` plots P and/or volumes of voronoi cells
-* :class:`~bet.postProcess.plotDomains` plots the data domain D in 2D
+* :class:`~bet.postProcess.plotP` plots :math:`P` and/or volumes (:math:`\mu`)
+ of voronoi cells
+* :class:`~bet.postProcess.plotDomains` plots the data domain
+ :math:`\mathcal{D}` in 2D
* :class:`~bet.postProcess.postTools` has tools for postprocessing
"""
diff --git a/bet/postProcess/plotDomains.py b/bet/postProcess/plotDomains.py
index 5cce4fea..2ff1a5d3 100644
--- a/bet/postProcess/plotDomains.py
+++ b/bet/postProcess/plotDomains.py
@@ -1,13 +1,30 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
-Methods used to plot two-dimensional domains and/or two-dimensional
-slices/projections of domains.
+This module provides methods used to plot two-dimensional domains and/or
+two-dimensional slices/projections of domains.
+
"""
import matplotlib.tri as tri
import numpy as np
import matplotlib.pyplot as plt
+from matplotlib.lines import Line2D
+from itertools import combinations
+from mpl_toolkits.mplot3d import Axes3D
+import bet.util as util
+
+markers = []
+for m in Line2D.markers:
+ try:
+ if len(m) == 1 and m != ' ':
+ markers.append(m)
+ except TypeError:
+ pass
-def scatter_2D(samples, sample_nos, color, p_ref, save, show,
+colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
+
+def scatter_2D(samples, sample_nos, color, p_ref, save, interactive,
xlabel, ylabel, filename):
"""
Two-dimensional scatter plot of ``samples`` colored by ``color`` (usually
@@ -21,7 +38,7 @@ def scatter_2D(samples, sample_nos, color, p_ref, save, show,
:param p_ref: reference parameter value
:type p_ref: :class:`np.ndarray`
:param boolean save: flag whether or not to save the figure
- :param boolean show: flag whether or not to show the figure
+ :param boolean interactive: flag whether or not to show the figure
:param string xlabel: x-axis label
:param string ylabel: y-axis label
:param string filename: filename to save the figure as
@@ -31,8 +48,7 @@ def scatter_2D(samples, sample_nos, color, p_ref, save, show,
sample_nos = range(samples.shape[0])
color = color[sample_nos]
plt.scatter(samples[sample_nos, 0], samples[sample_nos, 1], c=color, s=10,
- #edgecolor='none',
- alpha=.75, linewidth=.1, cmap=plt.cm.Oranges)#Oranges_r)
+ alpha=.75, linewidth=.1, cmap=plt.cm.Oranges)
cbar = plt.colorbar()
cbar.set_label(r'$\rho_\mathcal{D}(Q)$')
if p_ref != None:
@@ -43,12 +59,12 @@ def scatter_2D(samples, sample_nos, color, p_ref, save, show,
plt.ylabel(ylabel)
plt.savefig(filename, bbox_inches='tight', transparent=True,
pad_inches=0)
- if show:
+ if interactive:
plt.show()
else:
plt.close()
-def scatter_3D(samples, sample_nos, color, p_ref, save, show,
+def scatter_3D(samples, sample_nos, color, p_ref, save, interactive,
xlabel, ylabel, zlabel, filename):
"""
Three-dimensional scatter plot of ``samples`` colored by ``color`` (usually
@@ -62,7 +78,7 @@ def scatter_3D(samples, sample_nos, color, p_ref, save, show,
:param p_ref: reference parameter value
:type p_ref: :class:`np.ndarray`
:param boolean save: flag whether or not to save the figure
- :param boolean show: flag whether or not to show the figure
+ :param boolean interactive: flag whether or not to show the figure
:param string xlabel: x-axis label
:param string ylabel: y-axis label
:param string zlabel: z-axis label
@@ -77,7 +93,7 @@ def scatter_3D(samples, sample_nos, color, p_ref, save, show,
ax = fig.add_subplot(111, projection='3d')
ax.scatter(samples[sample_nos, 0], samples[sample_nos, 1],
samples[sample_nos, 2], s=10, alpha=.75, linewidth=.1, c=color,
- cmap=plt.cm.Oranges) #ax.colorbar()
+ cmap=plt.cm.Oranges)
if p_ref != None:
ax.scatter(p_ref[0], p_ref[1], p_ref[2], c='g')
@@ -88,13 +104,13 @@ def scatter_3D(samples, sample_nos, color, p_ref, save, show,
if save:
plt.savefig(filename, bbox_inches='tight', transparent=True,
pad_inches=0)
- if show:
+ if interactive:
plt.show()
else:
plt.close()
def show_param(samples, data, rho_D=None, p_ref=None, sample_nos=None,
- save=True, show=False, lnums=None):
+ save=True, interactive=False, lnums=None, showdim=None):
"""
Plot samples in parameter space and colors them either by rho_D or by
sample batch number.
@@ -110,27 +126,48 @@ def show_param(samples, data, rho_D=None, p_ref=None, sample_nos=None,
:param p_ref: reference parameter value
:type p_ref: :class:`np.ndarray`
:param boolean save: flag whether or not to save the figure
- :param boolean show: flag whether or not to show the figure
+ :param boolean interactive: flag whether or not to show the figure
+ :param list lnums: integers representing parameter domain coordinate
+ numbers
+ :param int showdim: 2 or 3, flag showing pairwise or tripletwise parameter
+ sample clouds
"""
if rho_D != None:
rD = rho_D(data)
+ else:
+ rD = np.ones(data.shape[0])
if lnums == None:
- lnums = 1+np.array(range(data.shape[1]))
+ lnums = 1+np.array(range(samples.shape[1]))
xlabel = r'$\lambda_{'+str(lnums[0])+'}$'
ylabel = r'$\lambda_{'+str(lnums[1])+'}$'
savename = 'param_samples_cs.eps'
- if data.shape[1] == 2:
- scatter_2D(samples, sample_nos, rD, p_ref, save, show, xlabel, ylabel,
- savename)
- elif data.shape[1] == 3:
+ if samples.shape[1] == 2:
+ scatter_2D(samples, sample_nos, rD, p_ref, save, interactive, xlabel,
+ ylabel, savename)
+ elif samples.shape[1] == 3:
zlabel = r'$\lambda_{'+str(lnums[2])+'}$'
- scatter_3D(samples, sample_nos, rD, p_ref, save, show, xlabel, ylabel,
- zlabel, savename)
+ scatter_3D(samples, sample_nos, rD, p_ref, save, interactive, xlabel,
+ ylabel, zlabel, savename)
+ elif samples.shape[1] > 2 and showdim == 2:
+ for x, y in combinations(lnums, 2):
+ xlabel = r'$\lambda_{'+str(x)+'}$'
+ ylabel = r'$\lambda_{'+str(y)+'}$'
+ savename = 'param_samples_l'+str(x)+'l'+str(y)+'_cs.eps'
+ scatter_2D(samples[:, [x-1, y-1]], sample_nos, rD, p_ref, save,
+ interactive, xlabel, ylabel, savename)
+ elif samples.shape[1] > 3 and showdim == 3:
+ for x, y, z in combinations(lnums, 3):
+ xlabel = r'$\lambda_{'+str(x)+'}$'
+ ylabel = r'$\lambda_{'+str(y)+'}$'
+ zlabel = r'$\lambda_{'+str(z)+'}$'
+ savename = 'param_samples_l'+str(x)+'l'+str(y)+'l'+str(z)+'_cs.eps'
+ scatter_3D(samples[:, [x-1, y-1, z-1]], sample_nos, rD, p_ref, save,
+ interactive, xlabel, ylabel, zlabel, savename)
def show_data(data, rho_D=None, Q_ref=None, sample_nos=None,
- save=True, show=False, Q_nums=None):
+ save=True, interactive=False, Q_nums=None, showdim=None):
"""
Plot samples in data space and colors them either by rho_D or by
sample batch number.
@@ -142,99 +179,169 @@ def show_data(data, rho_D=None, Q_ref=None, sample_nos=None,
:type rho_D: callable function that takes a :class:`np.array` and returns a
:class:`np.ndarray`
:param Q_ref: reference data value
- :type Q_ref: :class:`np.ndarray`
+ :type Q_ref: :class:`np.ndarray` of shape (mdim,)
:param boolean save: flag whether or not to save the figure
- :param boolean show: flag whether or not to show the figure
+ :param boolean interactive: flag whether or not to show the figure
+ :param list Q_nums: integers representing data domain coordinates
+ :param int showdim: 2 or 3, flag showing pairwise or tripletwise data
+ sample clouds
"""
if rho_D != None:
rD = rho_D(data)
+ else:
+ rD = np.ones(data.shape[0])
if Q_nums == None:
Q_nums = range(data.shape[1])
xlabel = r'$q_{'+str(Q_nums[0]+1)+'}$'
ylabel = r'$q_{'+str(Q_nums[1]+1)+'}$'
savename = 'data_samples_cs.eps'
if data.shape[1] == 2:
- scatter_2D(data, sample_nos, rD, Q_ref, save, show, xlabel, ylabel,
- savename)
+ q_ref = None
+ if type(Q_ref) == np.ndarray:
+ q_ref = Q_ref[Q_nums[:2]]
+ scatter_2D(data, sample_nos, rD, q_ref, save, interactive, xlabel,
+ ylabel, savename)
elif data.shape[1] == 3:
zlabel = r'$q_{'+str(Q_nums[2]+1)+'}$'
- scatter_3D(data, sample_nos, rD, Q_ref, save, show, xlabel, ylabel,
- zlabel, savename)
+ if type(Q_ref) == np.ndarray:
+ q_ref = Q_ref[Q_nums[:3]]
+ scatter_3D(data, sample_nos, rD, q_ref, save, interactive, xlabel,
+ ylabel, zlabel, savename)
+ elif data.shape[1] > 2 and showdim == 2:
+ for x, y in combinations(Q_nums, 2):
+ xlabel = r'$q_{'+str(x+1)+'}$'
+ ylabel = r'$q_{'+str(y+1)+'}$'
+ savename = 'data_samples_q'+str(x+1)+'q'+str(y+1)+'_cs.eps'
+ q_ref = None
+ if type(Q_ref) == np.ndarray:
+ q_ref = Q_ref[[x, y]]
+ scatter_2D(data[:, [x, y]], sample_nos, rD, q_ref, save,
+ interactive, xlabel, ylabel, savename)
+ elif data.shape[1] > 3 and showdim == 3:
+ for x, y, z in combinations(Q_nums, 3):
+ xlabel = r'$q_{'+str(x+1)+'}$'
+ ylabel = r'$q_{'+str(y+1)+'}$'
+ zlabel = r'$q_{'+str(z+1)+'}$'
+ q_ref = None
+ if type(Q_ref) == np.ndarray:
+ q_ref = Q_ref[[x, y, z]]
+ savename = 'data_samples_q'+str(x+1)+'q'+str(y+1)+'q'+str(z+1)+'_cs.eps'
+ scatter_3D(data[:, [x, y, z]], sample_nos, rD, q_ref, save,
+ interactive, xlabel, ylabel, zlabel, savename)
def show_data_domain_multi(samples, data, Q_ref, Q_nums=None,
- img_folder='figs/', ref_markers=['^', 's', 'o'],
- ref_colors=['r', 'g', 'b']):
- """
+ img_folder='figs/', ref_markers=None,
+ ref_colors=None, showdim=None):
+ r"""
Plot the data domain D using a triangulation based on the generating
- samples where $Q={q_1, q_i}$ for ``i=Q_nums``, with a marker for various
- ``Q_ref``.
+ samples where :math:`Q={q_1, q_i}` for ``i=Q_nums``, with a marker for
+ various :math:`Q_{ref}`.
:param samples: Samples to plot
- :type samples: :class:`~numpy.ndarray` of shape (ndim, num_samples)
+ :type samples: :class:`~numpy.ndarray` of shape (num_samples, ndim). Only
+ uses the first two dimensions.
:param data: Data associated with ``samples``
:type data: :class:`np.ndarray`
:param Q_ref: reference data value
- :type Q_ref: :class:`np.ndarray`
+ :type Q_ref: :class:`np.ndarray` of shape (M, mdim)`
:param list Q_nums: dimensions of the QoI to plot
:param string img_folder: folder to save the plots to
- :param list ref_markers: list of marker types for ``Q_ref``
- :param list ref_colors: list of colors for ``Q_ref``
+ :param list ref_markers: list of marker types for :math:`Q_{ref}`
+ :param list ref_colors: list of colors for :math:`Q_{ref}`
+ :param showdim: default 1. If int then flag to show all combinations with a
+ given dimension or if ``all`` show all combinations.
+ :type showdim: int or string
"""
-
+ if ref_markers == None:
+ ref_markers = markers
+ if ref_colors == None:
+ ref_colors = colors
if Q_nums == None:
Q_nums = range(data.shape[1])
+ if showdim == None:
+ showdim = 1
+
+ Q_ref = util.fix_dimensions_data(Q_ref, data.shape[1])
- triangulation = tri.Triangulation(samples[0, :], samples[1, :])
+ triangulation = tri.Triangulation(samples[:, 0], samples[:, 1])
triangles = triangulation.triangles
- for i in Q_nums:
-
- plt.tricontourf(data[:, 0], data[:, i], np.zeros((data.shape[0],)),
- triangles=triangles, colors='grey')
- plt.autoscale(tight=True)
- plt.xlabel(r'$q_1$')
- plt.ylabel(r'$q_{'+str(i+1)+r'}$')
+
+ if type(showdim) == int:
+ for i in Q_nums:
+ plt.tricontourf(data[:, showdim], data[:, i],
+ np.zeros((data.shape[0],)), triangles=triangles,
+ colors='grey')
+ plt.autoscale(tight=True)
+ xlabel = r'$q_{'+str(showdim+1)+r'}$'
+ ylabel = r'$q_{'+str(i+1)+r'}$'
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
- filenames = [img_folder+'domain_q1_'+str(i)+'.eps',
- img_folder+'q1_q'+str(i)+'_domain_Q_cs.eps']
-
- show_data_domain_2D(samples, data[:, [0, i]], Q_ref[:, [0, i]],
- ref_markers, ref_colors, ylabel=r'$q_{'+str(i+1)+r'}$',
- triangles=triangles, save=True, show=False, filenames=filenames)
+ filenames = [img_folder+'domain_q'+str(showdim+1)+'_'+str(i+1)+'.eps',
+ img_folder+'q'+str(showdim+1)+'_q'+str(i+1)+'_domain_Q_cs.eps']
+
+ show_data_domain_2D(samples, data[:, [showdim, i]], Q_ref[:,
+ [showdim, i]], ref_markers, ref_colors, xlabel=xlabel,
+ ylabel=ylabel, triangles=triangles, save=True,
+ interactive=False, filenames=filenames)
+ elif showdim == 'all' or showdim == 'ALL':
+ for x, y in combinations(Q_nums, 2):
+ plt.tricontourf(data[:, x], data[:, y], np.zeros((data.shape[0],)),
+ triangles=triangles, colors='grey')
+ plt.autoscale(tight=True)
+ xlabel = r'$q_{'+str(x+1)+r'}$'
+ ylabel = r'$q_{'+str(y+1)+r'}$'
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
-def show_data_domain_2D(samples, data, Q_ref, ref_markers=['^', 's', 'o'],
- ref_colors=['r', 'g', 'b'], xlabel=r'$q_1$', ylabel=r'$q_2',
- triangles=None, save=True, show=True, filenames=None):
- """
+ filenames = [img_folder+'domain_q'+str(x+1)+'_'+str(y+1)+'.eps',
+ img_folder+'q'+str(x+1)+'_q'+str(y+1)+'_domain_Q_cs.eps']
+
+ show_data_domain_2D(samples, data[:, [x, y]], Q_ref[:,
+ [x, y]], ref_markers, ref_colors, xlabel=xlabel,
+ ylabel=ylabel, triangles=triangles, save=True,
+ interactive=False, filenames=filenames)
+
+def show_data_domain_2D(samples, data, Q_ref, ref_markers=None,
+ ref_colors=None, xlabel=r'$q_1$', ylabel=r'$q_2',
+ triangles=None, save=True, interactive=False, filenames=None):
+ r"""
Plot the data domain D using a triangulation based on the generating
- samples with a marker for various ``Q_ref``. Assumes that the first
- dimension of data is $q_1$.
+ samples with a marker for various :math:`Q_{ref}`. Assumes that the first
+ dimension of data is :math:`q_1`.
:param samples: Samples to plot
- :type samples: :class:`~numpy.ndarray` of shape (ndim, num_samples)
+ :type samples: :class:`~numpy.ndarray` of shape (num_samples, ndim)
:param data: Data associated with ``samples``
:type data: :class:`np.ndarray`
:param Q_ref: reference data value
- :type Q_ref: :class:`np.ndarray`
- :param list ref_markers: list of marker types for ``Q_ref``
- :param list ref_colors: list of colors for ``Q_ref``
+ :type Q_ref: :class:`np.ndarray` of shape (M, 2)
+ :param list ref_markers: list of marker types for :math:`Q_{ref}`
+ :param list ref_colors: list of colors for :math:`Q_{ref}`
:param string xlabel: x-axis label
:param string ylabel: y-axis label
:param triangles: triangulation defined by ``samples``
:type triangles: :class:`tri.Triuangulation.triangles`
:param boolean save: flag whether or not to save the figure
- :param boolean show: flag whether or not to show the figure
+ :param boolean interactive: flag whether or not to show the figure
:param list filenames: file names for the unmarked and marked domain plots
"""
-
+ if ref_markers == None:
+ ref_markers = markers
+ if ref_colors == None:
+ ref_colors = colors
if triangles == None:
triangulation = tri.Triangulation(samples[:, 0], samples[:, 1])
triangles = triangulation.triangles
if filenames == None:
filenames = ['domain_q1_q2_cs.eps', 'q1_q2_domain_Q_cs.eps']
+
+ Q_ref = util.fix_dimensions_data(Q_ref, 2)
+
# Create figure
plt.tricontourf(data[:, 0], data[:, 1], np.zeros((data.shape[0],)),
triangles=triangles, colors='grey')
@@ -250,11 +357,9 @@ def show_data_domain_2D(samples, data, Q_ref, ref_markers=['^', 's', 'o'],
if save:
plt.savefig(filenames[1], bbox_inches='tight', transparent=True,
pad_inches=0)
- if show:
+ if interactive:
plt.show()
else:
plt.close()
-
-
diff --git a/bet/postProcess/plotP.py b/bet/postProcess/plotP.py
index 25c367fe..c3fb9285 100644
--- a/bet/postProcess/plotP.py
+++ b/bet/postProcess/plotP.py
@@ -1,37 +1,14 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This module provides methods for plotting probabilities.
"""
+
from bet.Comm import *
import matplotlib.pyplot as plt
import numpy as np
-import copy
-import math
-
-
-def plot_voronoi_probs(P_samples, samples, lam_domain, nbins=20,
- plot_surface=False):
- """
- This makes plots of the joint probabilies of input probability measure
- defined by P_samples for 2d cases. post_process - is an input that only
- applies to the 2d case w.r.t. the Voronoi cells.
-
- :param P_samples: Probabilities.
- :type P_samples: :class:'~numpy.ndarray' of shape (num_samples,)
- :param samples: The samples in parameter space for which the model was run.
- :type samples: :class:'~numpy.ndarray' of shape (num_samples, ndim)
- :param lam_domain: The domain for each parameter for the model.
- :type lam_domain: :class:'~numpy.ndarray' of shape (ndim, 2)
- :param nbins: Number of bins in each direction.
- :type nbins: :int
+import copy, math
- """
- lam_dim = lam_domain.shape[0]
-
- if lam_dim == 2: # Plot Voronoi tesselations, otherwise plot 2d
- #projections/marginals of the joint inverse measure
- num_samples = samples.shape[0]
- #Add fake samples outside of lam_domain to close Voronoi
- #tesselations at infinity
def calculate_1D_marginal_probs(P_samples, samples, lam_domain, nbins=20):
@@ -51,33 +28,27 @@ def calculate_1D_marginal_probs(P_samples, samples, lam_domain, nbins=20):
:returns: (bins, marginals)
"""
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
num_samples = samples.shape[0]
num_dim = samples.shape[1]
# Make list of bins if only an integer is given
if isinstance(nbins, int):
nbins = nbins*np.ones(num_dim, dtype=np.int)
-
+
# Create bins
bins = []
for i in range(num_dim):
bins.append(np.linspace(lam_domain[i][0], lam_domain[i][1], nbins[i]+1))
- bin_ptr = np.zeros((num_samples, num_dim), dtype=np.int)
- # Bin samples
- for j in range(num_dim):
- bin_ptr[:, j] = np.searchsorted(bins[j], samples[:, j])
- bin_ptr -= 1
-
- # Calculate marginal probabilities
+
+ # Calculate marginals
marginals = {}
for i in range(num_dim):
- marg = np.zeros((nbins[i]+1,))
- # This may be sped up with logical indices
- for k in range(num_samples):
- marg[bin_ptr[k][i]] += P_samples[k]
+ [marg, _] = np.histogram(samples[:,i], bins=bins[i], weights=P_samples)
marg_temp = np.copy(marg)
- comm.Allreduce([marg, MPI.DOUBLE],[marg_temp, MPI.DOUBLE], op=MPI.SUM)
- marginals[i] = marg_temp[:-1]
+ comm.Allreduce([marg, MPI.DOUBLE], [marg_temp, MPI.DOUBLE], op=MPI.SUM)
+ marginals[i] = marg_temp
return (bins, marginals)
@@ -99,39 +70,35 @@ def calculate_2D_marginal_probs(P_samples, samples, lam_domain, nbins=20):
:returns: (bins, marginals)
"""
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
num_samples = samples.shape[0]
num_dim = samples.shape[1]
# Make list of bins if only an integer is given
if isinstance(nbins, int):
nbins = nbins*np.ones(num_dim, dtype=np.int)
-
+
# Create bins
bins = []
for i in range(num_dim):
bins.append(np.linspace(lam_domain[i][0], lam_domain[i][1], nbins[i]+1))
- bin_ptr = np.zeros((num_samples, num_dim), dtype=np.int)
- # Bin samples
- for j in range(num_dim):
- bin_ptr[:, j] = np.searchsorted(bins[j], samples[:, j])
- bin_ptr -= 1
-
- # Calculate marginal probabilities
+
+ # Calculate marginals
marginals = {}
for i in range(num_dim):
for j in range(i+1, num_dim):
- marg = np.zeros((nbins[i]+1, nbins[j]+1))
- # This may be sped up with logical indices
- for k in range(num_samples):
- marg[bin_ptr[k][i]][bin_ptr[k][j]] += P_samples[k]
+ (marg, _) = np.histogramdd(samples[:,[i,j]], bins = [bins[i], bins[j]], weights = P_samples)
+ marg=np.ascontiguousarray(marg)
marg_temp = np.copy(marg)
- comm.Allreduce([marg, MPI.DOUBLE],[marg_temp, MPI.DOUBLE], op=MPI.SUM)
- marginals[(i, j)] = marg_temp[:-1,:-1]
+ comm.Allreduce([marg, MPI.DOUBLE], [marg_temp, MPI.DOUBLE],
+ op=MPI.SUM)
+ marginals[(i, j)] = marg_temp
return (bins, marginals)
def plot_1D_marginal_probs(marginals, bins, lam_domain,
- filename="file", lam_ref=None, interactive=True,
+ filename="file", lam_ref=None, interactive=False,
lambda_label=None):
"""
@@ -139,8 +106,9 @@ def plot_1D_marginal_probs(marginals, bins, lam_domain,
input probability measure defined by P_samples on a 1D grid.
:param marginals: 1D marginal probabilities
- :type marginals: dictionary with int as keys and :class:'~numpy.ndarray' of shape (nbins+1,) as values
- :param bins: Endpoints of bins used in calculating marginals
+ :type marginals: dictionary with int as keys and :class:'~numpy.ndarray' of
+ shape (nbins+1,) as values :param bins: Endpoints of bins used in
+ calculating marginals
:type bins: :class:'~numpy.ndarray' of shape (nbins+1,)
:param lam_domain: The domain for each parameter for the model.
:type lam_domain: :class:'~numpy.ndarray' of shape (ndim, 2)
@@ -154,31 +122,32 @@ def plot_1D_marginal_probs(marginals, bins, lam_domain,
:type lambda_label: list of length nbins of strings or None
"""
- from scipy.interpolate import interp1d
- from scipy.integrate import quad
- from matplotlib import cm
if rank == 0:
index = copy.deepcopy(marginals.keys())
index.sort()
for i in index:
- x_range = np.linspace(lam_domain[i,0], lam_domain[i,1], len(bins[i])-1)
+ x_range = np.linspace(lam_domain[i, 0], lam_domain[i, 1],
+ len(bins[i])-1)
fig = plt.figure(i)
ax = fig.add_subplot(111)
- ax.plot(x_range,marginals[i]/(bins[i][1]-bins[i][0]))
+ ax.plot(x_range, marginals[i]/(bins[i][1]-bins[i][0]))
if lam_ref != None:
ax.plot(lam_ref[i], 0.0, 'ko', markersize=10)
if lambda_label == None:
- label1 = '$\lambda_{' + `i+1` + '}$'
+ label1 = r'$\lambda_{' + str(i+1) + '}$'
else:
label1 = lambda_label[i]
ax.set_xlabel(label1)
ax.set_ylabel(r'$\rho$')
- fig.savefig(filename + "_1D_" + `i` + ".eps")
+ fig.savefig(filename + "_1D_" + str(i) + ".eps", transparent=True)
if interactive:
plt.show()
+ else:
+ plt.close()
+ plt.clf()
def plot_2D_marginal_probs(marginals, bins, lam_domain,
- filename="file", lam_ref=None, plot_surface=False, interactive=True,
+ filename="file", lam_ref=None, plot_surface=False, interactive=False,
lambda_label=None):
"""
@@ -186,7 +155,8 @@ def plot_2D_marginal_probs(marginals, bins, lam_domain,
input probability measure defined by P_samples on a rectangular grid.
:param marginals: 2D marginal probabilities
- :type marginals: dictionary with tuples of 2 integers as keys and :class:'~numpy.ndarray' of shape (nbins+1,) as values
+ :type marginals: dictionary with tuples of 2 integers as keys and
+ :class:'~numpy.ndarray' of shape (nbins+1,) as values
:param bins: Endpoints of bins used in calculating marginals
:type bins: :class:'~numpy.ndarray' of shape (nbins+1,2)
:param lam_domain: The domain for each parameter for the model.
@@ -212,26 +182,32 @@ def plot_2D_marginal_probs(marginals, bins, lam_domain,
fig = plt.figure(k)
ax = fig.add_subplot(111)
boxSize = (bins[i][1]-bins[i][0])*(bins[j][1]-bins[j][0])
- quadmesh = ax.imshow(marginals[(i, j)].transpose()/boxSize, interpolation='bicubic', cmap=cm.jet, extent = [lam_domain[i][0], lam_domain[i][1], lam_domain[j][0],
- lam_domain[j][1]],origin='lower', vmax=marginals[(i, j)].max()/boxSize, vmin=marginals[(i, j)].min()/boxSize, aspect='auto')
-
+ quadmesh = ax.imshow(marginals[(i, j)].transpose()/boxSize,
+ interpolation='bicubic', cmap=cm.jet,
+ extent=[lam_domain[i][0], lam_domain[i][1],
+ lam_domain[j][0], lam_domain[j][1]], origin='lower',
+ vmax=marginals[(i, j)].max()/boxSize, vmin=marginals[(i,
+ j)].min()/boxSize, aspect='auto')
if lam_ref != None:
ax.plot(lam_ref[i], lam_ref[j], 'ko', markersize=10)
if lambda_label == None:
- label1 = '$\lambda_{' + `i+1` + '}$'
- label2 = '$\lambda_{' + `j+1` + '}$'
+ label1 = r'$\lambda_{' + str(i+1) + '}$'
+ label2 = r'$\lambda_{' + str(j+1) + '}$'
else:
label1 = lambda_label[i]
label2 = lambda_label[j]
ax.set_xlabel(label1)
ax.set_ylabel(label2)
- label_cbar = r'$\rho_{' + '\lambda_{' + `i+1` + '}' + ',' + '\lambda_{' + `j+1` + '}' + '}$ (Lesbesgue)'
+ label_cbar = r'$\rho_{\lambda_{' + str(i+1) + '}, '
+ label_cbar += r'\lambda_{' + str(j+1) + '}' + '}$ (Lesbesgue)'
fig.colorbar(quadmesh, ax=ax, label=label_cbar)
plt.axis([lam_domain[i][0], lam_domain[i][1], lam_domain[j][0],
lam_domain[j][1]])
- fig.savefig(filename + "_2D_" + `i` + "_" + `j` + ".eps")
+ fig.savefig(filename + "_2D_" + str(i) + "_" + str(j) + ".eps", transparent=True)
if interactive:
plt.show()
+ else:
+ plt.close()
if plot_surface:
for k, (i, j) in enumerate(pairs):
@@ -245,22 +221,26 @@ def plot_2D_marginal_probs(marginals, bins, lam_domain,
antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
- ax.set_xlabel('$\lambda_{' + `i+1` + '}$')
- ax.set_ylabel('$\lambda_{' + `j+1` + '}$')
- ax.set_zlabel('$P$')
+ ax.set_xlabel(r'$\lambda_{' + str(i+1) + '}$')
+ ax.set_ylabel(r'$\lambda_{' + str(j+1) + '}$')
+ ax.set_zlabel(r'$P$')
plt.backgroundcolor = 'w'
fig.colorbar(surf, shrink=0.5, aspect=5, label=r'$P$')
- fig.savefig(filename + "_surf_"+ `i` + "_" +`j` + ".eps")
+ fig.savefig(filename + "_surf_"+str(i)+"_"+str(j)+".eps", transparent=True)
if interactive:
plt.show()
+ else:
+ plt.close()
+ plt.clf()
-def smooth_marginals_1D(marginals, bins, sigma=10.0):
+def smooth_marginals_1D(marginals, bins, sigma=10.0):
"""
This function smooths 1D marginal probabilities.
:param marginals: 1D marginal probabilities
- :type marginals: dictionary with int as keys and :class:'~numpy.ndarray' of shape (nbins+1,) as values
- :param bins: Endpoints of bins used in calculating marginals
+ :type marginals: dictionary with int as keys and :class:'~numpy.ndarray' of
+ shape (nbins+1,) as values :param bins: Endpoints of bins used in
+ calculating marginals
:type bins: :class:'~numpy.ndarray' of shape (nbins+1,)
:param sigma: Smoothing parameter in each direction.
:type sigma: :float or :class:'~numpy.ndarray' of shape (ndim,)
@@ -270,7 +250,7 @@ def smooth_marginals_1D(marginals, bins, sigma=10.0):
from scipy.fftpack import fftshift, ifft, fft
if isinstance(sigma, float):
- sigma = sigma*np.ones(len(bins),dtype=np.int)
+ sigma = sigma*np.ones(len(bins), dtype=np.int)
marginals_smooth = {}
index = copy.deepcopy(marginals.keys())
index.sort()
@@ -286,20 +266,21 @@ def smooth_marginals_1D(marginals, bins, sigma=10.0):
aug_kernel[augx:augx+nx] = kernel
aug_marginals[augx:augx+nx] = marginals[i]
- aug_kernel=fftshift(aug_kernel)
+ aug_kernel = fftshift(aug_kernel)
aug_marginals_smooth = np.real(ifft(fft(aug_kernel)*fft(aug_marginals)))
marginals_smooth[i] = aug_marginals_smooth[augx:augx+nx]
- marginals_smooth[i] = marginals_smooth[i]/np.sum(marginals_smooth[i])
+ marginals_smooth[i] = marginals_smooth[i]/np.sum(marginals_smooth[i])
return marginals_smooth
-def smooth_marginals_2D(marginals, bins, sigma=10.0):
+def smooth_marginals_2D(marginals, bins, sigma=10.0):
"""
This function smooths 2D marginal probabilities.
:param marginals: 2D marginal probabilities
- :type marginals: dictionary with tuples of 2 integers as keys and :class:'~numpy.ndarray' of shape (nbins+1,) as values
+ :type marginals: dictionary with tuples of 2 integers as keys and
+ :class:'~numpy.ndarray' of shape (nbins+1,) as values
:param bins: Endpoints of bins used in calculating marginals
:type bins: :class:'~numpy.ndarray' of shape (nbins+1,)
:param sigma: Smoothing parameter in each direction.
@@ -310,11 +291,11 @@ def smooth_marginals_2D(marginals, bins, sigma=10.0):
from scipy.fftpack import fftshift, ifft2, fft2
if isinstance(sigma, float):
- sigma = sigma*np.ones(len(bins),dtype=np.int)
+ sigma = sigma*np.ones(len(bins), dtype=np.int)
marginals_smooth = {}
pairs = copy.deepcopy(marginals.keys())
pairs.sort()
- for k, (i, j) in enumerate(pairs):
+ for (i, j) in pairs:
nx = len(bins[i])-1
ny = len(bins[j])-1
dx = bins[i][1] - bins[i][0]
@@ -328,17 +309,20 @@ def smooth_marginals_2D(marginals, bins, sigma=10.0):
X, Y = np.meshgrid(x_kernel, y_kernel, indexing='ij')
kernel = np.exp(-(X/sigma[i])**2-(Y/sigma[j])**2)
- aug_kernel = np.zeros((nx+2*augx,ny+2*augy))
- aug_marginals = np.zeros((nx+2*augx,ny+2*augy))
+ aug_kernel = np.zeros((nx+2*augx, ny+2*augy))
+ aug_marginals = np.zeros((nx+2*augx, ny+2*augy))
- aug_kernel[augx:augx+nx,augy:augy+ny] = kernel
- aug_marginals[augx:augx+nx,augy:augy+ny] = marginals[(i,j)]
+ aug_kernel[augx:augx+nx, augy:augy+ny] = kernel
+ aug_marginals[augx:augx+nx, augy:augy+ny] = marginals[(i, j)]
- aug_kernel=fftshift(aug_kernel,0)
- aug_kernel=fftshift(aug_kernel,1)
+ aug_kernel = fftshift(aug_kernel, 0)
+ aug_kernel = fftshift(aug_kernel, 1)
- aug_marginals_smooth = np.real(ifft2(fft2(aug_kernel)*fft2(aug_marginals)))
- marginals_smooth[(i,j)] = aug_marginals_smooth[augx:augx+nx,augy:augy+ny]
- marginals_smooth[(i,j)] = marginals_smooth[(i,j)]/np.sum(marginals_smooth[(i,j)])
+ aug_marginals_smooth = ifft2(fft2(aug_kernel)*fft2(aug_marginals))
+ aug_marginals_smooth = np.real(aug_marginals_smooth)
+ marginals_smooth[(i, j)] = aug_marginals_smooth[augx:augx+nx,
+ augy:augy+ny]
+ marginals_smooth[(i, j)] = marginals_smooth[(i,
+ j)]/np.sum(marginals_smooth[(i, j)])
return marginals_smooth
diff --git a/bet/postProcess/postTools.py b/bet/postProcess/postTools.py
index 7690b96b..acb70e61 100644
--- a/bet/postProcess/postTools.py
+++ b/bet/postProcess/postTools.py
@@ -1,18 +1,18 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
-This module provides methods for plotting probabilities.
+This module provides methods for postprocessing probabilities and data.
"""
-from bet.Comm import *
+from bet.Comm import rank
import numpy as np
-import copy
-import math
import scipy.io as sio
def sort_by_rho(P_samples, samples, lam_vol=None, data=None):
"""
- This sorts the samples by probability density. It returns the sorted values.
- If the samples are iid, no volume data is needed. It is optional to sort the QoI
- data, but be sure to do so if using it later.
+ This sorts the samples by probability density. It returns the sorted
+ values. If the samples are iid, no volume data is needed. It is optional
+ to sort the QoI data, but be sure to do so if using it later.
:param P_samples: Probabilities.
:type P_samples: :class:'~numpy.ndarray' of shape (num_samples,)
@@ -26,26 +26,33 @@ def sort_by_rho(P_samples, samples, lam_vol=None, data=None):
:returns: (P_samples, samples, lam_vol, data)
"""
- nnz = np.sum(P_samples>0)
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+ nnz = np.sum(P_samples > 0)
if lam_vol == None:
indices = np.argsort(P_samples)[::-1][0:nnz]
else:
indices = np.argsort(P_samples/lam_vol)[::-1][0:nnz]
P_samples = P_samples[indices]
- samples = samples[indices,:]
+ samples = samples[indices, :]
if lam_vol != None:
lam_vol = lam_vol[indices]
if data != None:
- data = data[indices]
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+ data = data[indices, :]
return (P_samples, samples, lam_vol, data)
-def sample_highest_prob(top_percentile, P_samples, samples, lam_vol=None, data=None, sort=True):
+def sample_highest_prob(top_percentile, P_samples, samples, lam_vol=None,
+ data=None, sort=True):
"""
- This calculates the highest probability samples whose probability sum to a given value.
- The number of high probability samples that sum to the value and the probabilities,
- samples, volumes, and data are returned. This assumes that ``P_samples``, ``samples``,
- ``lam_vol``, and ``data`` have all be sorted using :meth:`~bet.postProcess.sort_by_rho`.
+ This calculates the highest probability samples whose probability sum to a
+ given value. The number of high probability samples that sum to the value
+ and the probabilities, samples, volumes, and data are returned. This
+ assumes that ``P_samples``,
+ ``samples``, ``lam_vol``, and ``data`` have all be sorted using
+ :meth:`~bet.postProcess.sort_by_rho`.
:param top_percentile: ratio of highest probability samples to select
:type top_percentile: float
@@ -61,29 +68,30 @@ def sample_highest_prob(top_percentile, P_samples, samples, lam_vol=None, data=N
:returns: ( num_samples, P_samples, samples, lam_vol, data)
"""
-
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
if sort:
- (P_samples, samples, lam_vol, data) = sort_by_rho(P_samples, samples, lam_vol, data)
+ (P_samples, samples, lam_vol, data) = sort_by_rho(P_samples, samples,
+ lam_vol, data)
P_sum = np.cumsum(P_samples)
num_samples = np.sum(P_sum <= top_percentile)
P_samples = P_samples[0:num_samples]
- samples = samples[0:num_samples,:]
+ samples = samples[0:num_samples, :]
if lam_vol != None:
lam_vol = lam_vol[0:num_samples]
if data != None:
- data = data[0:num_samples,:]
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+ data = data[0:num_samples, :]
return (num_samples, P_samples, samples, lam_vol, data)
-
-def save_parallel_probs_csv(P_samples,
- samples,
- P_file,
- lam_file,
- compress=False):
+def save_parallel_probs_csv(P_samples, samples, P_file, lam_file,
+ compress=False):
"""
- Saves probabilites and samples from parallel runs in individual .csv files for each process.
+ Saves probabilites and samples from parallel runs in individual .csv files
+ for each process.
:param P_samples: Probabilities.
:type P_samples: :class:'~numpy.ndarray' of shape (num_samples,)
@@ -102,16 +110,14 @@ def save_parallel_probs_csv(P_samples,
else:
suffix = '.csv'
- np.savetxt(P_file + `rank` + suffix ,P_samples, delimiter = ',')
- np.savetxt(lam_file + `rank` + suffix ,samples, delimiter = ',')
+ np.savetxt(P_file + str(rank) + suffix, P_samples, delimiter=',')
+ np.savetxt(lam_file + str(rank) + suffix, samples, delimiter=',')
-def collect_parallel_probs_csv(P_file,
- lam_file,
- num_files,
- save = False,
- compress=False):
+def collect_parallel_probs_csv(P_file, lam_file, num_files, save=False,
+ compress=False):
"""
- Collects probabilities and samples saved in .csv format from parallel runs into single arrays.
+ Collects probabilities and samples saved in .csv format from parallel runs
+ into single arrays.
:param P_file: file prefix for probabilities
:type P_file: str
@@ -133,9 +139,9 @@ def collect_parallel_probs_csv(P_file,
P = np.loadtxt(P_file + '0' + suffix)
lam = np.loadtxt(lam_file + '0' + suffix)
- for i in range(1,num_files):
- P = np.vstack((P, np.loadtxt(P_file + `i` + suffix)))
- lam = np.vstack((lam,np.loadtxt(lam_file + `i` + suffix)))
+ for i in range(1, num_files):
+ P = np.vstack((P, np.loadtxt(P_file + str(i) + suffix)))
+ lam = np.vstack((lam, np.loadtxt(lam_file + str(i) + suffix)))
if save:
np.savetxt(P_file + 'all' + suffix, P)
@@ -143,12 +149,10 @@ def collect_parallel_probs_csv(P_file,
return (P, lam)
-def save_parallel_probs_mat(P_samples,
- samples,
- file_prefix,
- compress=False):
+def save_parallel_probs_mat(P_samples, samples, file_prefix, compress=False):
"""
- Saves probabilites and samples from parallel runs in individual .mat files for each process.
+ Saves probabilites and samples from parallel runs in individual .mat files
+ for each process.
:param P_samples: Probabilities.
:type P_samples: :class:'~numpy.ndarray' of shape (num_samples,)
@@ -158,16 +162,15 @@ def save_parallel_probs_mat(P_samples,
:type file_prefix: str
:returns: None
"""
- file_dict={"P_samples": P_samples,
+ file_dict = {"P_samples": P_samples,
"samples": samples}
- sio.savemat(file_prefix + `rank` , file_dict, do_compression=compress)
+ sio.savemat(file_prefix + str(rank), file_dict, do_compression=compress)
-def collect_parallel_probs_mat(file_prefix,
- num_files,
- save = False,
- compress = False):
+def collect_parallel_probs_mat(file_prefix, num_files, save=False,
+ compress=False):
"""
- Collects probabilities and samples saved in .mat format from parallel runs into single arrays.
+ Collects probabilities and samples saved in .mat format from parallel runs
+ into single arrays.
:param file_prefix: file prefix
:type file_prefix: str
@@ -183,10 +186,10 @@ def collect_parallel_probs_mat(file_prefix,
file_dict = sio.io.loadmat(file_prefix + "0")
P = file_dict["P_samples"]
lam = file_dict["samples"]
- for i in range(1,num_files):
- file_dict = sio.io.loadmat(file_prefix + `i`)
+ for i in range(1, num_files):
+ file_dict = sio.io.loadmat(file_prefix + str(i))
P = np.vstack((P, file_dict["P_samples"]))
- lam = np.vstack((lam,file_dict["samples"]))
+ lam = np.vstack((lam, file_dict["samples"]))
if save:
file_dict = {"P_samples": P,
@@ -194,3 +197,79 @@ def collect_parallel_probs_mat(file_prefix,
sio.savemat(file_prefix + "all", file_dict, do_compression=compress)
return (P, lam)
+
+def compare_yield(sort_ind, sample_quality, run_param, column_headings=None):
+ """
+
+ Compare the quality of samples where ``sample_quality`` is the measure of
+ quality by which the sets of samples have been indexed and ``sort_ind`` is
+ an array of the sorted indicies.
+
+ :param list() sort_ind: indicies that index ``sample_quality`` in sorted
+ order
+ :param list() sample_quality: a measure of quality by which the sets of
+ samples are sorted
+ :param list() run_param: zipped list of :class:`~numpy.ndarray`s containing
+ information used to generate the sets of samples to be displayed
+ :param list() column_headings: Column headings to print to screen
+
+ """
+ if column_headings == None:
+ column_headings = "Run parameters"
+ print "Sample Set No., Quality, "+ str(column_headings)
+ for i in reversed(sort_ind):
+ print i, sample_quality[i], np.round(run_param[i], 3)
+
+def in_high_prob(data, rho_D, maximum, sample_nos=None):
+ """
+
+ Estimates the number of samples in high probability regions of D.
+
+ :param data: Data associated with ``samples``
+ :type data: :class:`np.ndarray`
+ :param rho_D: probability density on D
+ :type rho_D: callable function that takes a :class:`np.array` and returns a
+ :class:`np.ndarray`
+ :param float maximum: maximum (or average) value of ``rho_D``
+ :param list sample_nos: sample numbers to plot
+
+ :rtype: int
+ :returns: Estimate of number of samples in the high probability area.
+
+ """
+ if sample_nos == None:
+ sample_nos = range(data.shape[0])
+ if len(data.shape) == 1:
+ rD = rho_D(data[sample_nos])
+ else:
+ rD = rho_D(data[sample_nos, :])
+ adjusted_total_prob = int(sum(rD)/maximum)
+ print "Samples in box "+str(adjusted_total_prob)
+ return adjusted_total_prob
+
+def in_high_prob_multi(results_list, rho_D, maximum, sample_nos_list=None):
+ """
+
+ Estimates the number of samples in high probability regions of D for a list
+ of results.
+
+ :param list results_list: list of (results, data) tuples
+ :param rho_D: probability density on D
+ :type rho_D: callable function that takes a :class:`np.array` and returns a
+ :class:`np.ndarray`
+ :param float maximum: maximum (or average) value of ``rho_D``
+ :param list sample_nos_list: list of sample numbers to plot (list of lists)
+
+ :rtype: list of int
+ :returns: Estimate of number of samples in the high probability area.
+
+ """
+ adjusted_total_prob = list()
+ if sample_nos_list:
+ for result, sample_nos in zip(results_list, sample_nos_list):
+ adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum,
+ sample_nos))
+ else:
+ for result in results_list:
+ adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum))
+ return adjusted_total_prob
diff --git a/bet/sampling/__init__.py b/bet/sampling/__init__.py
index 6c878f87..4e5806f8 100644
--- a/bet/sampling/__init__.py
+++ b/bet/sampling/__init__.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This subpackage contains
diff --git a/bet/sampling/adaptiveSampling.py b/bet/sampling/adaptiveSampling.py
index 8dbad904..fe2bfeb5 100644
--- a/bet/sampling/adaptiveSampling.py
+++ b/bet/sampling/adaptiveSampling.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# -*- coding: utf-8 -*-
# Lindley Graham 3/10/2014
"""
@@ -17,8 +19,6 @@
import math, os
from bet.Comm import *
-size = comm.Get_size()
-rank = comm.Get_rank()
def loadmat(save_file, lb_model=None):
"""
@@ -26,8 +26,8 @@ def loadmat(save_file, lb_model=None):
:class:`~bet.sampling.adaptiveSampling.sampler` object.
:param string save_file: file name
- :param model: runs the model at a given set of parameter samples and
- returns data
+ :param lb_model: runs the model at a given set of parameter samples, (N,
+ ndim), and returns data (N, mdim)
:rtype: tuple
:returns: (sampler, samples, data)
@@ -37,16 +37,18 @@ def loadmat(save_file, lb_model=None):
# load the samples
if mdat.has_key('samples'):
samples = mdat['samples']
+ num_samples = samples.shape[0]
else:
samples = None
+ num_samples = np.squeeze(mdat['num_samples'])
# load the data
if mdat.has_key('data'):
data = mdat['data']
else:
data = None
# recreate the sampler
- new_sampler = sampler(mdat['num_samples'], mdat['chain_length'],
- lb_model)
+ new_sampler = sampler(num_samples,
+ np.squeeze(mdat['chain_length']), lb_model)
return (new_sampler, samples, data)
@@ -65,6 +67,11 @@ class sampler(bsam.sampler):
def __init__(self, num_samples, chain_length, lb_model):
"""
Initialization
+
+ :param int num_samples: Total number of samples
+ :param int chain_length: Number of samples per chain
+ :param lb_model: runs the model at a given set of parameter samples, (N,
+ ndim), and returns data (N, mdim)
"""
super(sampler, self).__init__(lb_model, num_samples)
self.chain_length = chain_length
@@ -88,12 +95,13 @@ def update_mdict(self, mdict):
mdict['sample_batch_no'] = self.sample_batch_no
def run_gen(self, kern_list, rho_D, maximum, param_min, param_max,
- t_kernel, savefile, initial_sample_type="lhs", criterion='center'):
+ t_set, savefile, initial_sample_type="lhs", criterion='center'):
"""
Generates samples using generalized chains and a list of different
kernels.
- :param list() kern_list: List of kernels.
+ :param list() kern_list: List of
+ :class:~`bet.sampling.adaptiveSampling.kernel` objects.
:param rho_D: probability density on D
:type rho_D: callable function that takes a :class:`np.array` and
returns a :class:`numpy.ndarray`
@@ -102,11 +110,9 @@ def run_gen(self, kern_list, rho_D, maximum, param_min, param_max,
:type param_min: np.array (ndim,)
:param param_max: maximum value for each parameter dimension
:type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
+ :param t_set: method for creating new parameter steps using
given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
- :param function kernel: functional that acts on the data used to
- determine the proposed change to the ``step_size``
+ :type t_set: :class:~`bet.sampling.adaptiveSampling.transition_set`
:param string savefile: filename to save samples and data
:param string initial_sample_type: type of initial sample random (or r),
latin hypercube(lhs), or space-filling curve(TBD)
@@ -124,7 +130,7 @@ def run_gen(self, kern_list, rho_D, maximum, param_min, param_max,
mean_ss = list()
for kern in kern_list:
(samples, data, step_sizes) = self.generalized_chains(
- param_min, param_max, t_kernel, kern, savefile,
+ param_min, param_max, t_set, kern, savefile,
initial_sample_type, criterion)
results.append((samples, data))
r_step_size.append(step_sizes)
@@ -133,56 +139,6 @@ def run_gen(self, kern_list, rho_D, maximum, param_min, param_max,
sort_ind = np.argsort(results_rD)
return (results, r_step_size, results_rD, sort_ind, mean_ss)
- def run_reseed(self, kern_list, rho_D, maximum, param_min, param_max,
- t_kernel, savefile, initial_sample_type="lhs", criterion='center',
- reseed=3):
- """
- Generates samples using reseeded chains and a list of different
- kernels.
-
- THIS IS NOT OPERATIONAL DO NOT USE.
-
- :param list() kern_list: List of kernels.
- :param rho_D: probability density on D
- :type rho_D: callable function that takes a :class:`np.array` and
- returns a :class:`numpy.ndarray`
- :param double maximum: maximum value of rho_D
- :param param_min: minimum value for each parameter dimension
- :type param_min: np.array (ndim,)
- :param param_max: maximum value for each parameter dimension
- :type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
- given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
- :param function kernel: functional that acts on the data used to
- determine the proposed change to the ``step_size``
- :param string savefile: filename to save samples and data
- :param string initial_sample_type: type of initial sample random (or r),
- latin hypercube(lhs), or space-filling curve(TBD)
- :param string criterion: latin hypercube criterion see
- `PyDOE `_
- :rtype: tuple
- :returns: ((samples, data), all_step_ratios, num_high_prob_samples,
- sorted_incidices_of_num_high_prob_samples, average_step_ratio)
-
- """
- results = list()
- # reseeding sampling
- results = list()
- r_step_size = list()
- results_rD = list()
- mean_ss = list()
- for kern in kern_list:
- (samples, data, step_sizes) = self.reseed_chains(
- param_min, param_max, t_kernel, kern, savefile,
- initial_sample_type, criterion, reseed)
- results.append((samples, data))
- r_step_size.append(step_sizes)
- results_rD.append(int(sum(rho_D(data)/maximum)))
- mean_ss.append(np.mean(step_sizes))
- sort_ind = np.argsort(results_rD)
- return (results, r_step_size, results_rD, sort_ind, mean_ss)
-
def run_tk(self, init_ratio, min_ratio, max_ratio, rho_D, maximum,
param_min, param_max, kernel, savefile,
initial_sample_type="lhs", criterion='center'):
@@ -205,11 +161,9 @@ def run_tk(self, init_ratio, min_ratio, max_ratio, rho_D, maximum,
:type param_min: np.array (ndim,)
:param param_max: maximum value for each parameter dimension
:type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
- given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
- :param function kernel: functional that acts on the data used to
+ :param kernel: functional that acts on the data used to
determine the proposed change to the ``step_size``
+ :type kernel: :class:~`bet.sampling.adaptiveSampling.kernel` object.
:param string savefile: filename to save samples and data
:param string initial_sample_type: type of initial sample random (or r),
latin hypercube(lhs), or space-filling curve(TBD)
@@ -225,9 +179,9 @@ def run_tk(self, init_ratio, min_ratio, max_ratio, rho_D, maximum,
results_rD = list()
mean_ss = list()
for i, j, k in zip(init_ratio, min_ratio, max_ratio):
- tk = transition_set(i, j, k)
+ ts = transition_set(i, j, k)
(samples, data, step_sizes) = self.generalized_chains(
- param_min, param_max, tk, kernel, savefile,
+ param_min, param_max, ts, kernel, savefile,
initial_sample_type, criterion)
results.append((samples, data))
r_step_size.append(step_sizes)
@@ -237,7 +191,7 @@ def run_tk(self, init_ratio, min_ratio, max_ratio, rho_D, maximum,
return (results, r_step_size, results_rD, sort_ind, mean_ss)
def run_inc_dec(self, increase, decrease, tolerance, rho_D, maximum,
- param_min, param_max, t_kernel, savefile,
+ param_min, param_max, t_set, savefile,
initial_sample_type="lhs", criterion='center'):
"""
Generates samples using generalized chains and
@@ -256,11 +210,9 @@ def run_inc_dec(self, increase, decrease, tolerance, rho_D, maximum,
:type param_min: np.array (ndim,)
:param param_max: maximum value for each parameter dimension
:type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
+ :param t_set: method for creating new parameter steps using
given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
- :param function kernel: functional that acts on the data used to
- determine the proposed change to the ``step_size``
+ :type t_set: :class:~`bet.sampling.adaptiveSampling.transition_set`
:param string savefile: filename to save samples and data
:param string initial_sample_type: type of initial sample random (or r),
latin hypercube(lhs), or space-filling curve(TBD)
@@ -275,9 +227,9 @@ def run_inc_dec(self, increase, decrease, tolerance, rho_D, maximum,
for i, j, z in zip(increase, decrease, tolerance):
kern_list.append(rhoD_kernel(maximum, rho_D, i, j, z))
return self.run_gen(kern_list, rho_D, maximum, param_min, param_max,
- t_kernel, savefile, initial_sample_type, criterion)
+ t_set, savefile, initial_sample_type, criterion)
- def generalized_chains(self, param_min, param_max, t_kernel, kern,
+ def generalized_chains(self, param_min, param_max, t_set, kern,
savefile, initial_sample_type="lhs", criterion='center'):
"""
Basic adaptive sampling algorithm using generalized chains.
@@ -288,11 +240,12 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
:type param_min: np.array (ndim,)
:param param_max: maximum value for each parameter dimension
:type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
+ :param t_set: method for creating new parameter steps using
given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
+ :type t_set: :class:~`bet.sampling.adaptiveSampling.transition_set`
:param function kern: functional that acts on the data used to
determine the proposed change to the ``step_size``
+ :type kernel: :class:~`bet.sampling.adaptiveSampling.kernel` object.
:param string savefile: filename to save samples and data
:param string criterion: latin hypercube criterion see
`PyDOE `_
@@ -305,7 +258,7 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
"""
if size > 1:
- savefile = os.path.join(os.path.dirname(savefile),
+ psavefile = os.path.join(os.path.dirname(savefile),
"proc{}{}".format(rank, os.path.basename(savefile)))
# Initialize Nx1 vector Step_size = something reasonable (based on size
@@ -313,17 +266,18 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
# Calculate domain size
param_left = np.repeat([param_min], self.num_chains_pproc, 0)
param_right = np.repeat([param_max], self.num_chains_pproc, 0)
+
param_width = param_right - param_left
# Calculate step_size
- max_ratio = t_kernel.max_ratio
- min_ratio = t_kernel.min_ratio
- step_ratio = t_kernel.init_ratio*np.ones(self.num_chains_pproc)
+ max_ratio = t_set.max_ratio
+ min_ratio = t_set.min_ratio
+ step_ratio = t_set.init_ratio*np.ones(self.num_chains_pproc)
# Initiative first batch of N samples (maybe taken from latin
# hypercube/space-filling curve to fully explore parameter space - not
# necessarily random). Call these Samples_old.
(samples_old, data_old) = super(sampler, self).random_samples(
- initial_sample_type, param_min, param_max, savefile,
+ initial_sample_type, param_min, param_max, psavefile,
self.num_chains, criterion)
self.num_samples = self.chain_length * self.num_chains
comm.Barrier()
@@ -345,7 +299,7 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
for batch in xrange(1, self.chain_length):
# For each of N samples_old, create N new parameter samples using
# transition set and step_ratio. Call these samples samples_new.
- samples_new = t_kernel.step(step_ratio, param_width,
+ samples_new = t_set.step(step_ratio, param_width,
param_left, param_right, MYsamples_old)
# Solve the model for the samples_new.
@@ -372,9 +326,8 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
mdat['step_ratios'] = all_step_ratios
mdat['samples'] = samples
mdat['data'] = data
- super(sampler, self).save(mdat, "p"+str(rank)+savefile)
+ super(sampler, self).save(mdat, psavefile)
- # samples_old = samples_new
MYsamples_old = samples_new
# collect everything
@@ -398,37 +351,7 @@ def generalized_chains(self, param_min, param_max, t_kernel, kern,
super(sampler, self).save(mdat, savefile)
return (samples, data, all_step_ratios)
-
- def reseed_chains(self, param_min, param_max, t_kernel, kern,
- savefile, initial_sample_type="lhs", criterion='center', reseed=1):
- """
- Basic adaptive sampling algorithm.
-
- NOT YET IMPLEMENTED.
-
- :param string initial_sample_type: type of initial sample random (or r),
- latin hypercube(lhs), or space-filling curve(TBD)
- :param param_min: minimum value for each parameter dimension
- :type param_min: np.array (ndim,)
- :param param_max: maximum value for each parameter dimension
- :type param_max: np.array (ndim,)
- :param t_kernel: method for creating new parameter steps using
- given a step size based on the paramter domain size
- :type t_kernel: :class:~`t_kernel`
- :param function kern: functional that acts on the data used to
- determine the proposed change to the ``step_size``
- :param string savefile: filename to save samples and data
- :param string criterion: latin hypercube criterion see
- `PyDOE `_
- :param int reseed: number of times to reseed the chains
- :rtype: tuple
- :returns: (``parameter_samples``, ``data_samples``) where
- ``parameter_samples`` is np.ndarray of shape (num_samples, ndim)
- and ``data_samples`` is np.ndarray of shape (num_samples, mdim)
-
- """
- pass
-
+
def kernels(Q_ref, rho_D, maximum):
"""
Generates a list of kernstic objects.
@@ -440,15 +363,13 @@ def kernels(Q_ref, rho_D, maximum):
a class:`np.ndarray`
:param double maximum: maximum value of rho_D
:rtype: list()
- :returns: [maxima_mean_kernel, rhoD_kernel, maxima_kernel,
- multi_dist_kernel]
+ :returns: [maxima_mean_kernel, rhoD_kernel, maxima_kernel]
"""
kern_list = list()
kern_list.append(maxima_mean_kernel(np.array([Q_ref]), rho_D))
kern_list.append(rhoD_kernel(maximum, rho_D))
kern_list.append(maxima_kernel(np.array([Q_ref]), rho_D))
- kern_list.append(multi_dist_kernel())
return kern_list
class transition_set(object):
@@ -472,6 +393,11 @@ class transition_set(object):
def __init__(self, init_ratio, min_ratio, max_ratio):
"""
Initialization
+
+ :param double init_ratio: initial step ratio
+ :param double min_ratio: minimum step_ratio
+ :param double max_ratio: maximum step_ratio
+
"""
self.init_ratio = init_ratio
self.min_ratio = min_ratio
@@ -487,7 +413,13 @@ def step(self, step_ratio, param_width, param_left, param_right,
:param step_ratio: define maximum step_size = ``step_ratio*param_width``
:type step_ratio: :class:`np.array` of shape (num_samples,)
:param param_width: width of the parameter domain
- :type param_width: np.array (ndim,)
+ :type param_width: :class:`np.ndarray` of shape (ndim,)
+ :param param_left: minimum boundary of the parameter domain
+ :type param_left: :class:`np.ndarray` of shape (ndim, N) where N is the
+ length of ``step_ratio``
+ :param param_right: maximum boundary of the parameter domain
+ :type param_right: :class:`np.ndarray` of shape (ndim, N) where N is the
+ length of ``step_ratio``
:param samples_old: Parameter samples from the previous step.
:type samples_old: :class:`~numpy.ndarray` of shape (num_samples,
ndim)
@@ -523,7 +455,7 @@ class kernel(object):
this is simply a skeleton parent class it does not change the step size at
all.
- tolerance
+ TOL
a tolerance used to determine if two different values are close
increase
the multiple to increase the step size by
@@ -534,6 +466,11 @@ class kernel(object):
def __init__(self, tolerance=1E-08, increase=1.0, decrease=1.0):
"""
Initialization
+
+ :param double tolerance: Tolerance for comparing two values
+ :param double increase: The multiple to increase the step size by
+ :param double decrease: The multiple to decrease the step size by
+
"""
self.TOL = tolerance
self.increase = increase
@@ -581,6 +518,13 @@ def __init__(self, maximum, rho_D, tolerance=1E-08, increase=2.0,
decrease=0.5):
"""
Initialization
+
+ :param double maximum: maximum value of rho_D
+ :param function rho_D: probability density on D
+ :param double tolerance: Tolerance for comparing two values
+ :param double increase: The multiple to increase the step size by
+ :param double decrease: The multiple to decrease the step size by
+
"""
self.MAX = maximum
self.rho_D = rho_D
@@ -634,7 +578,7 @@ class maxima_kernel(kernel):
maxima
locations of the maxima of rho_D on D
- np.array of shape (num_maxima, mdim)
+ :class:`np.ndarray` of shape (num_maxima, mdim)
rho_max
rho_D(maxima), list of maximum values of rho_D
tolerance
@@ -650,6 +594,16 @@ def __init__(self, maxima, rho_D, tolerance=1E-08, increase=2.0,
decrease=0.5):
"""
Initialization
+
+ :param maxima: locations of the maxima of rho_D on D
+ :type maxima: :class:`np.ndarray` of chape (num_maxima, mdim)
+ :param rho_D: probability density on D
+ :type rho_D: callable function that takes a :class:`np.array` and returns
+ a class:`np.ndarray`
+ :param double tolerance: Tolerance for comparing two values
+ :param double increase: The multiple to increase the step size by
+ :param double decrease: The multiple to decrease the step size by
+
"""
self.MAXIMA = maxima
self.num_maxima = maxima.shape[0]
@@ -729,6 +683,16 @@ def __init__(self, maxima, rho_D, tolerance=1E-08, increase=2.0,
decrease=0.5):
"""
Initialization
+
+ :param maxima: locations of the maxima of rho_D on D
+ :type maxima: :class:`np.ndarray` of chape (num_maxima, mdim)
+ :param rho_D: probability density on D
+ :type rho_D: callable function that takes a :class:`np.array` and returns
+ a class:`np.ndarray`
+ :param double tolerance: Tolerance for comparing two values
+ :param double increase: The multiple to increase the step size by
+ :param double decrease: The multiple to decrease the step size by
+
"""
self.radius = None
self.mean = None
@@ -810,106 +774,4 @@ def delta_step(self, data_new, kern_old=None):
return (kern_new, proposal)
-class multi_dist_kernel(kernel):
- """
- The goal is to make a sampling that is robust to different types of
- distributions on QoI, i.e., we do not know a priori where the regions of
- high probability are in D. This class provides a method for determining the
- proposed step size as follows. We keep track of the change of the QoI
- values from one sample to the next compared to the total range of QoI
- values explored so far. If a big relative change is detected, then you know
- that you have come across a region with larger derivatives and you should
- place more samples around there to resolve the induced regions of
- generalized contours, i.e., reduce the step size. If the change in QoI
- values is relatively small, you are in a region where there is little
- sensitivity, so take larger step sizes.
-
- radius
- current estimate of the radius of D (1/2 the diameter of D)
- mean
- current estimate of the mean QoI
- current_clength
- current batch number
- TOL
- a tolerance used to determine if two different values are close
- increase
- the multiple to increase the step size by
- decrease
- the multiple to decrease the step size by
-
- """
-
- def __init__(self, tolerance=1E-08, increase=2.0,
- decrease=0.5):
- """
- Initialization
- """
- self.radius = None
- self.mean = None
- self.current_clength = 0
- super(multi_dist_kernel, self).__init__(tolerance, increase,
- decrease)
-
- def reset(self):
- """
- Resets the the batch number and the estimates of the mean and maximum
- distance from the mean.
- """
- self.radius = None
- self.mean = None
- self.current_clength = 0
-
- def delta_step(self, data_new, kern_old=None):
- """
- This method determines the proposed change in step size.
-
- :param data_new: QoI for a given batch of samples
- :type data_new: :class:`np.array` of shape (num_chains, mdim)
- :param kern_old: QoI evaluated at previous step
- :rtype: tuple
- :returns: (kern_new, proposal)
-
- """
- # Evaluate kernel for new data.
- kern_new = data_new
- self.current_clength = self.current_clength + 1
-
- if kern_old == None:
- proposal = None
- # calculate the mean
- self.mean = np.mean(data_new, 0)
- # calculate the distance from the mean
- vec_from_mean = kern_new - np.repeat([self.mean],
- kern_new.shape[0], 0)
- # estimate the radius of D
- self.radius = np.max(np.linalg.norm(vec_from_mean, 2, 1))
- else:
- # update the estimate of the mean
- self.mean = (self.current_clength-1)*self.mean + np.mean(data_new, 0)
- self.mean = self.mean / self.current_clength
- # calculate the distance from the mean
- vec_from_mean = kern_new - np.repeat([self.mean],
- kern_new.shape[0], 0)
- # esitmate the radius of D
- self.radius = max(np.max(np.linalg.norm(vec_from_mean, 2, 1)),
- self.radius)
- # calculate the relative change in QoI
- kern_diff = (kern_new-kern_old)
- # normalize by the radius of D
- kern_diff = np.linalg.norm(vec_from_mean, 2, 1)#/self.radius
- # Compare to kernel for old data.
- # Is the kernel NOT close?
- kern_close = np.logical_not(np.isclose(kern_diff, 0,
- atol=self.TOL))
- # Is the kernel greater/lesser?
- kern_greater = np.logical_and(kern_diff > 0, kern_close)
- kern_lesser = np.logical_and(kern_diff < 0, kern_close)
- # Determine step size
- proposal = np.ones(kern_diff.shape)
- proposal[kern_greater] = self.decrease
- proposal[kern_lesser] = self.increase
- return (kern_new, proposal)
-
-
-
diff --git a/bet/sampling/basicSampling.py b/bet/sampling/basicSampling.py
index 8eca6b91..c7592f3f 100644
--- a/bet/sampling/basicSampling.py
+++ b/bet/sampling/basicSampling.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# Lindley Graham 4/15/2014
"""
This module contains functions for sampling. We assume we are given access to a
@@ -14,73 +16,6 @@
import bet.util as util
from bet.Comm import *
-def compare_yield(sort_ind, sample_quality, run_param, column_headings=None):
- """
- Compare the quality of samples where ``sample_quality`` is the measure of
- quality by which the sets of samples have been indexed and ``sort_ind`` is
- an array of the sorted indicies.
-
- :param list() sort_int: indicies that index ``sample_quality`` in sorted
- order
- :param list() sample_quality: a measure of quality by which the sets of
- samples are sorted
- :param list() run_param: zipped list of :class:`~numpy.ndarray`s containing
- information used to generate the sets of samples to be displayed
-
- """
- if column_headings == None:
- column_headings = "Run parameters"
- print "Sample Set No., Quality, "+ column_headings
- for i in reversed(sort_ind):
- print i, sample_quality[i], np.round(run_param[i], 3)
-
-def in_high_prob(data, rho_D, maximum, sample_nos=None):
- """
- Estimates the number of samples in high probability regions of D.
-
- :param data: Data associated with ``samples``
- :type data: :class:`np.ndarray`
- :param rho_D: probability density on D
- :type rho_D: callable function that takes a :class:`np.array` and returns a
- :class:`np.ndarray`
- :param list sample_nos: sample numbers to plot
-
- :rtype: int
- :returns: Estimate of number of samples in the high probability area.
-
- """
- if sample_nos == None:
- sample_nos = range(data.shape[0])
- rD = rho_D(data[sample_nos, :])
- adjusted_total_prob = int(sum(rD)/maximum)
- print "Samples in box "+str(adjusted_total_prob)
- return adjusted_total_prob
-
-def in_high_prob_multi(results_list, rho_D, maximum, sample_nos_list=None):
- """
- Estimates the number of samples in high probability regions of D for a list
- of results.
-
- :param list results_list: list of (results, data) tuples
- :param rho_D: probability density on D
- :type rho_D: callable function that takes a :class:`np.array` and returns a
- :class:`np.ndarray`
- :param list sample_nos_list: list of sample numbers to plot (list of lists)
-
- :rtype: list of int
- :returns: Estimate of number of samples in the high probability area.
-
- """
- adjusted_total_prob = list()
- if sample_nos_list:
- for result, sample_nos in zip(results_list, sample_nos_list):
- adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum,
- sample_nos))
- else:
- for result in results_list:
- adjusted_total_prob.append(in_high_prob(result[1], rho_D, maximum))
- return adjusted_total_prob
-
def loadmat(save_file, model=None):
"""
Loads data from ``save_file`` into a
@@ -98,14 +33,16 @@ def loadmat(save_file, model=None):
# load the samples
if mdat.has_key('samples'):
samples = mdat['samples']
+ num_samples = samples.shape[0]
else:
samples = None
+ num_samples = None
# load the data
if mdat.has_key('data'):
data = mdat['data']
else:
data = None
- loaded_sampler = sampler(model, mdat['num_samples'])
+ loaded_sampler = sampler(model, num_samples)
return (loaded_sampler, samples, data)
class sampler(object):
@@ -123,6 +60,10 @@ class sampler(object):
def __init__(self, lb_model, num_samples=None):
"""
Initialization
+
+ :param lb_model: Interface to physics-based model takes an input of
+ shape (N, ndim) and returns an output of shape (N, mdim)
+ :param int num_samples: N, number of samples (optional)
"""
self.num_samples = num_samples
self.lb_model = lb_model
@@ -167,6 +108,7 @@ def random_samples(self, sample_type, param_min, param_max,
:param param_max: maximum value for each parameter dimension
:type param_max: np.array (ndim,)
:param string savefile: filename to save samples and data
+ :param int num_samples: N, number of samples (optional)
:param string criterion: latin hypercube criterion see
`PyDOE `_
:param boolean parallel: Flag for parallel implementation. Uses
@@ -201,43 +143,45 @@ def user_samples(self, samples, savefile, parallel=False):
Numpy and other Python packages. Instead of reimplementing them here we
provide sampler that utilizes user specified samples.
- Note: Parallel implementation with changes ordering of
- the samples if ``samples.shape[0]`` is not divisible by ``size``.
-
:param samples: samples to evaluate the model at
- :type samples: :class:`~numpy.ndarray` of shape (ndim, num_samples)
+ :type samples: :class:`~numpy.ndarray` of shape (num_smaples, ndim)
:param string savefile: filename to save samples and data
:param boolean parallel: Flag for parallel implementation. Uses
lowercase ``mpi4py`` methods if ``samples.shape[0]`` is not
divisible by ``size``. Default value is ``False``.
:rtype: tuple
:returns: (``parameter_samples``, ``data_samples``) where
- ``parameter_samples`` is np.ndarray of shape (ndim, num_samples)
+ ``parameter_samples`` is np.ndarray of shape (num_samples, ndim)
and ``data_samples`` is np.ndarray of shape (num_samples, mdim)
"""
# Update the number of samples
self.num_samples = samples.shape[0]
- size = comm.Get_size()
- rank = comm.Get_rank()
# Solve the model at the samples
if not(parallel) or size == 1:
data = self.lb_model(samples)
- elif parallel and self.num_samples%size == 0:
- my_samples = np.empty((samples.shape[0]/size, samples.shape[1]))
- comm.Scatter([samples, MPI.DOUBLE], [my_samples, MPI.DOUBLE])
- my_data = self.lb_model(my_samples)
- data = np.empty((self.num_samples, my_data.shape[1]),
- dtype=np.float64)
- comm.Allgather([my_data, MPI.DOUBLE], [data, MPI.DOUBLE])
elif parallel:
- my_index = range(0+rank, self.num_samples, size)
- my_samples = samples[my_index, :]
+ my_len = self.num_samples/size
+ if rank != size-1:
+ my_index = range(0+rank*my_len, (rank+1)*my_len)
+ else:
+ my_index = range(0+rank*my_len, self.num_samples)
+ if len(samples.shape) == 1:
+ my_samples = samples[my_index]
+ else:
+ my_samples = samples[my_index, :]
my_data = self.lb_model(my_samples)
data = util.get_global_values(my_data)
- samples = util.get_global_values(my_data)
+ samples = util.get_global_values(my_samples)
+
+ # if data or samples are of shape (num_samples,) expand dimensions
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+
mdat = dict()
self.update_mdict(mdat)
diff --git a/bet/util.py b/bet/util.py
index 1a10a57b..4f3a0258 100644
--- a/bet/util.py
+++ b/bet/util.py
@@ -1,10 +1,14 @@
-import numpy as np
-from bet.Comm import *
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
"""
The module contains general tools for BET.
"""
+import numpy as np
+from bet.Comm import *
+import collections
+
+possible_types = {int:MPI.INT, float:MPI.DOUBLE}
def meshgrid_ndim(X):
"""
@@ -14,7 +18,7 @@ def meshgrid_ndim(X):
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays (x1, x2,..., xn).
-
+
:param X: A tuple containing the 1d coordinate arrays
:type X: tuple
@@ -22,37 +26,160 @@ def meshgrid_ndim(X):
:returns: X_new
"""
n = len(X)
- alist=[]
+ alist = []
for i in range(n):
alist.append(X[i])
- for i in range(n,10):
+ for i in range(n, 10):
alist.append(np.array([0]))
- a,b,c,d,e,f,g,h,i,j = np.meshgrid(alist[0],
- alist[1],
- alist[2],
- alist[3],
- alist[4],
- alist[5],
- alist[6],
- alist[7],
- alist[8],
- alist[9],
- indexing='ij')
-
- X_new = np.vstack((a.flat[:],b.flat[:],c.flat[:],d.flat[:],e.flat[:],f.flat[:],g.flat[:],h.flat[:],i.flat[:],j.flat[:])).transpose()
- X_new = X_new[:,0:n]
-
+
+ a, b, c, d, e, f, g, h, i, j = np.meshgrid(alist[0],
+ alist[1],
+ alist[2],
+ alist[3],
+ alist[4],
+ alist[5],
+ alist[6],
+ alist[7],
+ alist[8],
+ alist[9],
+ indexing='ij')
+
+ X_new = np.vstack(
+ (a.flat[:],
+ b.flat[:],
+ c.flat[:],
+ d.flat[:],
+ e.flat[:],
+ f.flat[:],
+ g.flat[:],
+ h.flat[:],
+ i.flat[:],
+ j.flat[:])).transpose()
+ X_new = X_new[:, 0:n]
+
return X_new
-
-def get_global_values(array):
+
+def get_global_values(array, shape=None):
"""
- Concatenates local arrays into global array.
+ Concatenates local arrays into global array using :meth:`np.vstack`.
:param array: Array.
- :type P_samples: :class:'~numpy.ndarray'
- :rtype: :class:'~numpy.ndarray'
+ :type P_samples: :class:'~numpy.ndarray'
+ :rtype: :class:'~numpy.ndarray'
:returns: array
"""
- array = comm.allgather(array, array)
- return np.vstack(array)
+ # Figure out the subtype of the elements of the array
+ dtype = array.dtype
+ mpi_dtype = False
+ for ptype in possible_types.iterkeys():
+ if np.issubdtype(dtype, ptype):
+ mpi_dtype = True
+ dtype = ptype
+
+ if shape == None or not mpi_dtype:
+ # do a lowercase allgather
+ a_shape = len(array.shape)
+ array = comm.allgather(array, array)
+ if a_shape == 1:
+ return np.hstack(array)
+ else:
+ return np.vstack(array)
+ else:
+ # do an uppercase Allgather
+ whole_a = np.empty(shape, dtype=dtype)
+ comm.Allgather([array, possible_types[dtype]], [whole_a,
+ possible_types[dtype]])
+ return whole_a
+
+def fix_dimensions_vector(vector):
+ """
+ Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of
+ shape (N,).
+
+ :param vector: numerical object
+ :rtype: :class:`numpy.ndarray`
+ :returns: array of shape (N,)
+
+ """
+ if not isinstance(vector, collections.Iterable):
+ vector = np.array([vector])
+ elif not isinstance(vector, np.ndarray):
+ vector = np.array(vector)
+ return vector
+
+def fix_dimensions_vector_2darray(vector):
+ """
+ Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of
+ shape (N,1).
+
+ :param vector: numerical object
+ :rtype: :class:`numpy.ndarray`
+ :returns: array of shape (N,1)
+
+ """
+ vector = fix_dimensions_vector(vector)
+ if len(vector.shape) == 1:
+ vector = np.expand_dims(vector, axis=1)
+ return vector
+
+def fix_dimensions_domain(domain):
+ """
+ Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of
+ shape (dim, 2).
+
+ :param vector: numerical object of at least length 2
+ :type vector: :class:`collections.Iterable`
+ :rtype: :class:`numpy.ndarray`
+ :retuns: array of shape (dim, 2)
+
+ """
+ if not isinstance(domain, np.ndarray):
+ if len(domain) == 2:
+ domain = np.expand_dims(domain, axis=0)
+ else:
+ raise TypeError("The length must be at least 2.")
+ elif len(domain.shape) == 1 and domain.shape[0] == 2:
+ domain = np.expand_dims(domain, axis=0)
+ elif len(domain.shape) == 2 and domain.shape[1] == 2:
+ pass # The shape is already correct!
+ elif len(domain.shape) == 2 and domain.shape[0] == 2:
+ domain = domain.transpose()
+ else:
+ raise TypeError("At least one dimension must have a length of 2.")
+ return domain
+
+def fix_dimensions_data(data, dim=None):
+ """
+ Fix the dimensions of an input so that it is a :class:`numpy.ndarray` of
+ shape (N, dim).
+
+ If ``dim`` is non-specified:
+ If ``data`` is a non-iterable number assumes that ``dim==1``.
+ If ``data`` is a numpy array with len(shape) == 1 assumes that ``dim==1``.
+ If ``data`` is a numpy array with len(shape) == 2 assumes that
+ ``dim==shape[1]``.
+
+
+ :param data: numerical object
+ :param int dim: The dimension of the "data" space.
+ :rtype: :class:`numpy.ndarray`
+ :returns: array of shape (N, dim)
+
+ """
+ if dim == None:
+ if not isinstance(data, np.ndarray):
+ return fix_dimensions_vector_2darray(data)
+ elif len(data.shape) == 1:
+ return fix_dimensions_vector_2darray(data)
+ else:
+ return data
+
+ data = fix_dimensions_vector_2darray(data)
+ if data.shape[1] != dim:
+ return data.transpose()
+ else:
+ return data
+
+
+
diff --git a/doc/bet.loadBalance.rst b/doc/bet.loadBalance.rst
deleted file mode 100644
index 9d0888c6..00000000
--- a/doc/bet.loadBalance.rst
+++ /dev/null
@@ -1,30 +0,0 @@
-bet.loadBalance package
-=======================
-
-Submodules
-----------
-
-bet.loadBalance.lb_PADCIRC module
----------------------------------
-
-.. automodule:: bet.loadBalance.lb_PADCIRC
- :members:
- :undoc-members:
- :show-inheritance:
-
-bet.loadBalance.load_balance module
------------------------------------
-
-.. automodule:: bet.loadBalance.load_balance
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Module contents
----------------
-
-.. automodule:: bet.loadBalance
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/bet.rst b/doc/bet.rst
index cd1d45fe..971c45d9 100644
--- a/doc/bet.rst
+++ b/doc/bet.rst
@@ -7,7 +7,6 @@ Subpackages
.. toctree::
bet.calculateP
- bet.loadBalance
bet.postProcess
bet.sampling
diff --git a/doc/conf.py b/doc/conf.py
index 90bee7db..64b3bcd9 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# -*- coding: utf-8 -*-
#
# BET documentation build configuration file, created by
@@ -32,7 +34,9 @@
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None),
'polyadcirc' : ('http://ut-chg.github.io/PolyADCIRC', None),
'matplotlib':('http://matplotlib.sourceforge.net', None),
- 'numpy':('http://docs.scipy.org/doc/numpy',None)
+ 'numpy':('http://docs.scipy.org/doc/numpy',None),
+ 'np':('http://docs.scipy.org/doc/numpy',None),
+ 'scipy':('http://docs.scipy.org/doc/scipy',None)
}
todo_include_todos = True
@@ -58,9 +62,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.1'
+version = '0.2'
# The full version, including alpha/beta/rc tags.
-release = '0.1.0'
+release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/examples/fromADCIRCMap/Q_1D_serial.rst b/doc/examples/fromADCIRCMap/Q_1D_serial.rst
index 79a212f1..1a3c4c25 100644
--- a/doc/examples/fromADCIRCMap/Q_1D_serial.rst
+++ b/doc/examples/fromADCIRCMap/Q_1D_serial.rst
@@ -91,7 +91,7 @@ Calculate :math:`\hat{\rho}_{\Lambda, j}` where :math:`\mathcal{V}_j` are the
voronoi cells defined by :math:`\lambda_{emulate}`::
(P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
- rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
mdict['P0'] = P0
mdict['lem0'] = lem0
mdict['io_ptr0'] = io_ptr0
@@ -101,13 +101,12 @@ Calculate :math:`\hat{\rho}_{\Lambda, j}` where :math:`\mathcal{V}_j` are the
voronoi cells defined by :math:`\lambda_{samples}` assume that :math:`\lambda_{samples}`
are uniformly distributed and therefore have approximately the same volume::
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = calcP.prob(samples, data,
- rho_D_M, d_distr_samples, lam_domain, d_Tree)
+ (P1, lam_vol1, lem1, io_ptr1) = calcP.prob(samples, data,
+ rho_D_M, d_distr_samples, d_Tree)
mdict['P1'] = P1
mdict['lam_vol1'] = lam_vol1
mdict['lem1'] = lem1
mdict['io_ptr1'] = io_ptr1
- mdict['emulate_ptr1'] = emulate_ptr1
Calculate :math:`\hat{\rho}_{\Lambda, j}` where :math:`\mathcal{V}_j` are the
voronoi cells defined by :math:`\lambda_{samples}` and we approximate the volume of
@@ -115,7 +114,7 @@ voronoi cells defined by :math:`\lambda_{samples}` and we approximate the volume
:math:`\lambda_{emulate}` to estimate the volume of :math:`\mathcal{V}_j` ::
(P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
- data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
mdict['P3'] = P3
mdict['lam_vol3'] = lam_vol3
mdict['io_ptr3'] = io_ptr3
diff --git a/doc/examples/fromADCIRCMap/Q_2D.rst b/doc/examples/fromADCIRCMap/Q_2D.rst
index fc219853..5f4ae1f5 100644
--- a/doc/examples/fromADCIRCMap/Q_2D.rst
+++ b/doc/examples/fromADCIRCMap/Q_2D.rst
@@ -15,8 +15,8 @@ This example demostrates three different methods to estimate
These methods are distinguished primarily by the way :math:`\mathcal{V}_j` are
defined and the approximation of the volume of :math:`\mathcal{V}_j`. See
-:download:`Q_2D.py
-<../../../examples/fromADCIRCMap/Q_2D.py>` for the example source code. Since
+:download:`Q_2D_serial.py
+<../../../examples/fromADCIRCMap/Q_2D_serial.py>` for the example source code. Since
this example is essentially the same as :ref:`q1D` we will only highlight the
differences between the two.
@@ -44,7 +44,7 @@ Define the refernce solution. We define a region of interest, :math:`R_{ref} \su
center_pts_per_edge=np.ones((data.shape[1],)))
-Finally, we calculate :math:`\hat{\rho}_{\Lambda, j}` for three reference solutions and the QoI :math:( (q_1,q_2), (q_1, q_5), and (q_1, q_6)) ::
+Finally, we calculate :math:`\hat{\rho}_{\Lambda, j}` for three reference solutions and the QoI :math:`( (q_1,q_2), (q_1, q_5)`, and :math:`(q_1, q_6))` ::
ref_nums = [6, 11, 15] # 7, 12, 16
stations = [1, 4, 5] # 2, 5, 6
diff --git a/doc/examples/fromADCIRCMap/Q_3D.rst b/doc/examples/fromADCIRCMap/Q_3D.rst
index 6d5c19df..2d15f65d 100644
--- a/doc/examples/fromADCIRCMap/Q_3D.rst
+++ b/doc/examples/fromADCIRCMap/Q_3D.rst
@@ -35,6 +35,8 @@ Also the ``postprocess(station_nums, ref_num)`` function in this case only uses
Parallel Example
~~~~~~~~~~~~~~~~
+.. note:: The parallel version of this example has been moved to the development branch.
+
This example demostrates how to estimate :math:`\hat{\rho}_{\Lambda, j}` using
:meth:`~bet.calculateP.calculateP.prob_mc` where
@@ -71,11 +73,10 @@ Within the ``postprocess(station_nums, ref_num)`` function in this case we only
need to use :meth:`~bet.postProcess.plotP.get_global_values` to concatenate the arrays
spread out across the processors into a single array::
- mdict['lambda_emulate'] = util.get_global_values(lambda_emulate)
mdict['num_l_emulate'] = mdict['lambda_emulate'].shape[1]
mdict['P3'] = util.get_global_values(P3)
- mdict['lam_vol3'] = util.get_global_values(lam_vol3)
- mdict['io_ptr3'] = util.get_global_values(io_ptr3)
+ mdict['lam_vol3'] = lam_vol3
+ mdict['io_ptr3'] = io_ptr3
Furthermore, we only want to write out the solution using a single processor::
diff --git a/doc/index.rst b/doc/index.rst
index 82bd345c..60d29f84 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -13,14 +13,14 @@ Contents:
:glob:
overview
+ parallel
examples/*
examples/linearMap/*
examples/fromFileMap/*
examples/fromADCIRCMap/*
+ modules
todo_list
-.. todo:: Flesh out documentation.
-
diff --git a/doc/overview.rst b/doc/overview.rst
index 579a324e..68c37e9f 100644
--- a/doc/overview.rst
+++ b/doc/overview.rst
@@ -8,9 +8,9 @@ Installation
------------
The code currently resides at `GitHub
-`_.
+`_.
If you have a
-`zip file `_ you can install
+`zip file `_ you can install
BET using::
python setup.py install
@@ -28,26 +28,22 @@ The package layout is as follows::
bet/
calculateP/
- calculateP.py
- simpleFunP.py
- voronoiHistogram.py
+ calculateP
+ simpleFunP
+ voronoiHistogram
sampling/
- basicSampling.py
- adaptiveSampling.py
+ basicSampling
+ adaptiveSampling
postProcess/
- plotP.py
- plotDomains.py
+ plotP
+ plotDomains
postTools
- loadBalance/ # !!! IN ACTIVE DEVELOPMENT !!!
- lb_PADCIRC.py
- load_balance.py
- lb_DIAMOND.py
Code Overview
--------------
:mod:`calculateP` Package
-~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~
.. automodule:: bet.calculateP
@@ -61,10 +57,6 @@ Code Overview
.. automodule:: bet.postProcess
-:mod:`loadBalance` Package
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: bet.loadBalance
.. seealso:: :ref:`modindex` for detailed documentation of modules, classes, etc.
diff --git a/doc/parallel.rst b/doc/parallel.rst
new file mode 100644
index 00000000..0a5d5b73
--- /dev/null
+++ b/doc/parallel.rst
@@ -0,0 +1,94 @@
+.. _parallel:
+
+========
+Parallel
+========
+
+Installation
+------------
+
+Running this code in parallel requires the installation of `MPI for Python
+`_ which requires that your system has mpi
+installed.
+
+Running in parallel
+-------------------
+
+Depending on what modules, methods, and/or functions from BET your script uses
+small or no changes will need to be made to run your script in parallel. To run
+your script in parallel you will have to use::
+
+ $ mpirun -np NUM_PROCS python YOUR_SCRIPT.py
+
+instead of ::
+
+ $ python YOUR_SCRIPT.py
+
+You might need to make sure your globalize your arrays or make sure to choose
+the ``parallel`` flag on some functions to ensure correct execution.
+
+Affected Packages
+-----------------
+
+The modules that have parallel capabilities are as follows::
+
+ bet/
+ util
+ calculateP/
+ calculateP
+ simpleFunP
+ sampling/
+ basicSampling
+ adaptiveSampling
+ postProcess/
+ plotP
+ postTools
+
+util
+~~~~
+The module :mod:`~bet.util` provides the method
+:meth:`~bet.util.get_global_values` to globalize local arrays into an array of
+global values on all processors.
+
+calculateP
+~~~~~~~~~~
+All methods in the module :mod:`~bet.calculateP.calculateP` benefit from
+parallel execution. Only local arrays are returned for ``P``, use
+:meth:`~bet.util.get_global_values` to globalize local arrays.
+
+In the module :mod:`~bet.calculateP.simpleFunP` the methods
+:meth:`~bet.calculateP.simpleFunP.unif_unif`,
+:meth:`~bet.calculateP.simpleFunP.normal_normal`, and
+:meth:`~bet.calculateP.simpleFunP.unif_normal` benefit from parallel
+execution.
+
+sampling
+~~~~~~~~
+If you are using a model with parallel capabilities we recommend that you write
+your own python interface to handle running multiple parallel copies of your
+model simulatenously. If your model is serial then you might benefit from
+parallel execution of scripts that use
+:class:`bet.sampling.basicSampling.sampler` or
+:class:`bet.sampling.adaptiveSampling.sampler`. The method
+:meth:`~bet.sampling.basicSampling.sampler.user_samples` has a parallel option
+(must be specified in the method call) which will partition the samples over
+several processors and return a globalized set of results. The method
+:meth:`~bet.sampling.adaptiveSampling.sampler.generalized_chains` divides up
+the chains among the availiable processors and returns a globalized result.
+
+postProcess
+~~~~~~~~~~~
+In :mod:`~bet.postProcess.plotP` the methods
+:meth:`~bet.postProcess.plotP.calculate_1D_marginal_probs` and
+:meth:`~bet.postProcess.plotP.calculate_2D_marginal_probs` benefit from
+parallel execution. The methods :meth:`~bet.postProcess.plotP.plot_1D_marginal_probs` and
+:meth:`~bet.postProcess.plotP.plot_2D_marginal_probs` will only execute on the
+rank 0 processor.
+
+In :mod:`~bet.postProcess.postTools` the methods
+:meth:`~bet.postProcess.postTools.save_parallel_probs_csv`,
+:meth:`~bet.postProcess.postTools.collect_parallel_probs_csv`,
+:meth:`~bet.postProcess.postTools.save_parallel_probs_mat`, and
+:meth:`~bet.postProcess.postTools.collect_parallel_probs_mat` provide tools to
+save and collect probabitlies on separate processors as appropriately named files.
+
diff --git a/examples/fromADCIRCMap/Q_1D_serial.py b/examples/fromADCIRCMap/Q_1D_serial.py
index 5afbf2a8..c0c14ec9 100644
--- a/examples/fromADCIRCMap/Q_1D_serial.py
+++ b/examples/fromADCIRCMap/Q_1D_serial.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
import bet.calculateP.calculateP as calcP
import bet.calculateP.simpleFunP as sfun
import numpy as np
@@ -19,7 +21,7 @@ def postprocess(station_nums, ref_num):
filename = 'P_q'+str(station_nums[0]+1)+'_q'
if len(station_nums) == 3:
filename += '_q'+str(station_nums[2]+1)
- filename += '_truth_'+str(ref_num+1)
+ filename += '_ref_'+str(ref_num+1)
data = Q[:, station_nums]
q_ref = Q_ref[ref_num, station_nums]
@@ -43,7 +45,7 @@ def postprocess(station_nums, ref_num):
# Calculate P on lambda emulate
(P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
- rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
print "Calculating prob_emulated"
mdict['P0'] = P0
mdict['lem0'] = lem0
@@ -52,25 +54,24 @@ def postprocess(station_nums, ref_num):
# Calclate P on the actual samples with assumption that voronoi cells have
# equal size
- (P1, lam_vol1, io_ptr1, emulate_ptr1) = calcP.prob(samples, data,
- rho_D_M, d_distr_samples, lam_domain, d_Tree)
+ (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
+ rho_D_M, d_distr_samples, d_Tree)
print "Calculating prob"
mdict['P1'] = P1
mdict['lam_vol1'] = lam_vol1
mdict['lem1'] = samples
mdict['io_ptr1'] = io_ptr1
- mdict['emulate_ptr1'] = emulate_ptr1
# Calculate P on the actual samples estimating voronoi cell volume with MC
# integration
(P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
- data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
print "Calculating prob_mc"
mdict['P3'] = P3
mdict['lam_vol3'] = lam_vol3
mdict['io_ptr3'] = io_ptr3
mdict['emulate_ptr3'] = emulate_ptr3
- # Export P and compare to MATLAB solution visually
+ # Export P
sio.savemat(filename, mdict, do_compression=True)
# Post-process and save P and emulated points
diff --git a/examples/fromADCIRCMap/Q_2D_parallel.py b/examples/fromADCIRCMap/Q_2D_parallel.py
new file mode 100644
index 00000000..79a00ef2
--- /dev/null
+++ b/examples/fromADCIRCMap/Q_2D_parallel.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+import bet.calculateP.calculateP as calcP
+import bet.calculateP.simpleFunP as sfun
+import numpy as np
+import scipy.io as sio
+import bet.util as util
+from bet.Comm import rank
+
+# Import "Truth"
+mdat = sio.loadmat('Q_2D')
+Q = mdat['Q']
+Q_ref = mdat['Q_true']
+
+# Import Data
+samples = mdat['points'].transpose()
+lam_domain = np.array([[0.07, .15], [0.1, 0.2]])
+
+print "Finished loading data"
+
+def postprocess(station_nums, ref_num):
+
+ filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
+ if len(station_nums) == 3:
+ filename += '_q'+str(station_nums[2]+1)
+ filename += '_ref_'+str(ref_num+1)
+
+ data = Q[:, station_nums]
+ q_ref = Q_ref[ref_num, station_nums]
+
+ # Create Simple function approximation
+ # Save points used to parition D for simple function approximation and the
+ # approximation itself (this can be used to make close comparisions...)
+ (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
+ q_ref, bin_ratio=0.15,
+ center_pts_per_edge=np.ones((data.shape[1],)))
+
+ num_l_emulate = 1e6
+ lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
+
+ if rank == 0:
+ print "Finished emulating lambda samples"
+ mdict = dict()
+ mdict['rho_D_M'] = rho_D_M
+ mdict['d_distr_samples'] = d_distr_samples
+ mdict['num_l_emulate'] = num_l_emulate
+
+ # Calculate P on lambda emulate
+ (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
+ rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
+ if rank == 0:
+ print "Calculating prob_emulated"
+ mdict['P0'] = P0
+ mdict['lem0'] = lem0
+ mdict['io_ptr0'] = io_ptr0
+ mdict['emulate_ptr0'] = emulate_ptr0
+
+ # Calclate P on the actual samples with assumption that voronoi cells have
+ # equal size
+ (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
+ rho_D_M, d_distr_samples, d_Tree)
+ if rank == 0:
+ print "Calculating prob"
+ mdict['P1'] = P1
+ mdict['lam_vol1'] = lam_vol1
+ mdict['lem1'] = samples
+ mdict['io_ptr1'] = io_ptr1
+
+ # Calculate P on the actual samples estimating voronoi cell volume with MC
+ # integration
+ (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
+ data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
+ if rank == 0:
+ print "Calculating prob_mc"
+ mdict['P3'] = P3
+ mdict['lam_vol3'] = lam_vol3
+ mdict['io_ptr3'] = io_ptr3
+ mdict['emulate_ptr3'] = emulate_ptr3
+ # Export P
+ sio.savemat(filename, mdict, do_compression=True)
+
+# Post-process and save P and emulated points
+ref_nums = [6, 11, 15] # 7, 12, 16
+stations = [1, 4, 5] # 2, 5, 6
+
+ref_nums, stations = np.meshgrid(ref_nums, stations)
+ref_nums = ref_nums.ravel()
+stations = stations.ravel()
+
+for tnum, stat in zip(ref_nums, stations):
+ postprocess([0, stat], tnum)
+
diff --git a/examples/fromADCIRCMap/Q_2D.py b/examples/fromADCIRCMap/Q_2D_serial.py
similarity index 85%
rename from examples/fromADCIRCMap/Q_2D.py
rename to examples/fromADCIRCMap/Q_2D_serial.py
index d701e618..56be87dc 100644
--- a/examples/fromADCIRCMap/Q_2D.py
+++ b/examples/fromADCIRCMap/Q_2D_serial.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
import bet.calculateP.calculateP as calcP
import bet.calculateP.simpleFunP as sfun
import numpy as np
@@ -19,7 +21,7 @@ def postprocess(station_nums, ref_num):
filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
if len(station_nums) == 3:
filename += '_q'+str(station_nums[2]+1)
- filename += '_truth_'+str(ref_num+1)
+ filename += '_ref_'+str(ref_num+1)
data = Q[:, station_nums]
q_ref = Q_ref[ref_num, station_nums]
@@ -43,7 +45,7 @@ def postprocess(station_nums, ref_num):
# Calculate P on lambda emulate
(P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
- rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
print "Calculating prob_emulated"
mdict['P0'] = P0
mdict['lem0'] = lem0
@@ -52,25 +54,24 @@ def postprocess(station_nums, ref_num):
# Calclate P on the actual samples with assumption that voronoi cells have
# equal size
- (P1, lam_vol1, io_ptr1, emulate_ptr1) = calcP.prob(samples, data,
- rho_D_M, d_distr_samples, lam_domain, d_Tree)
+ (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
+ rho_D_M, d_distr_samples, d_Tree)
print "Calculating prob"
mdict['P1'] = P1
mdict['lam_vol1'] = lam_vol1
mdict['lem1'] = samples
mdict['io_ptr1'] = io_ptr1
- mdict['emulate_ptr1'] = emulate_ptr1
# Calculate P on the actual samples estimating voronoi cell volume with MC
# integration
(P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
- data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
+ data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
print "Calculating prob_mc"
mdict['P3'] = P3
mdict['lam_vol3'] = lam_vol3
mdict['io_ptr3'] = io_ptr3
mdict['emulate_ptr3'] = emulate_ptr3
- # Export P and compare to MATLAB solution visually
+ # Export P
sio.savemat(filename, mdict, do_compression=True)
# Post-process and save P and emulated points
diff --git a/examples/fromADCIRCMap/Q_3D_parallel.py b/examples/fromADCIRCMap/Q_3D_parallel.py
deleted file mode 100644
index a90659fc..00000000
--- a/examples/fromADCIRCMap/Q_3D_parallel.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import bet.calculateP.calculateP as calcP
-import bet.calculateP.simpleFunP as sfun
-import numpy as np
-import scipy.io as sio
-from mpi4py import MPI
-from bet import util
-
-comm = MPI.COMM_WORLD
-rank = comm.Get_rank()
-
-# Import "Truth"
-mdat = sio.loadmat('Q_3D')
-Q = mdat['Q']
-Q_ref = mdat['Q_true']
-
-# Import Data
-samples = mdat['points'].transpose()
-lam_domain = np.array([[-900, 1200], [0.07, .15], [0.1, 0.2]])
-
-print "Finished loading data"
-
-def postprocess(station_nums, ref_num):
-
- filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
- if len(station_nums) == 3:
- filename += '_q'+str(station_nums[2]+1)
- filename += '_truth_'+str(ref_num+1)
-
- data = Q[:, station_nums]
- q_ref = Q_ref[ref_num, station_nums]
-
- # Create Simple function approximation
- # Save points used to parition D for simple function approximation and the
- # approximation itself (this can be used to make close comparisions...)
- (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
- q_ref, bin_ratio=0.15,
- center_pts_per_edge=np.ones((data.shape[1],)))
-
- num_l_emulate = 1e6
- lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
- print "Finished emulating lambda samples"
-
- # Calculate P on the actual samples estimating voronoi cell volume with MC
- # integration
- (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
- data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
- print "Calculating prob_mc"
- mdict = dict()
- mdict['rho_D_M'] = rho_D_M
- mdict['d_distr_samples'] = d_distr_samples
- mdict['lambda_emulate'] = util.get_global_values(lambda_emulate)
- mdict['num_l_emulate'] = mdict['lambda_emulate'].shape[1]
- mdict['P3'] = util.get_global_values(P3)
- mdict['lam_vol3'] = util.get_global_values(lam_vol3)
- mdict['io_ptr3'] = util.get_global_values(io_ptr3)
- mdict['emulate_ptr3'] = emulate_ptr3
-
- if rank == 0:
- # Export P and compare to MATLAB solution visually
- sio.savemat(filename, mdict, do_compression=True)
-
-# Post-process and save P and emulated points
-ref_num = 14
-
-# q1, q5, q2 ref 15
-station_nums = [0, 4, 1] # 1, 5, 2
-postprocess(station_nums, ref_num)
-
-# q1, q5 ref 15
-station_nums = [0, 4] # 1, 5
-postprocess(station_nums, ref_num)
-
-# q1, q5, q12 ref 16
-station_nums = [0, 4, 11] # 1, 5, 12
-postprocess(station_nums, ref_num)
-
-
-station_nums = [0, 8, 6] # 1, 5, 12
-postprocess(station_nums, ref_num)
-
-
-station_nums = [0, 8, 11] # 1, 5, 12
-postprocess(station_nums, ref_num)
-
-
diff --git a/examples/fromADCIRCMap/Q_3D_serial.py b/examples/fromADCIRCMap/Q_3D_serial.py
index 4efdeba4..f0a1f0e3 100644
--- a/examples/fromADCIRCMap/Q_3D_serial.py
+++ b/examples/fromADCIRCMap/Q_3D_serial.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
import bet.calculateP.calculateP as calcP
import bet.calculateP.simpleFunP as sfun
import numpy as np
@@ -19,7 +21,7 @@ def postprocess(station_nums, ref_num):
filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
if len(station_nums) == 3:
filename += '_q'+str(station_nums[2]+1)
- filename += '_truth_'+str(ref_num+1)
+ filename += '_ref_'+str(ref_num+1)
data = Q[:, station_nums]
q_ref = Q_ref[ref_num, station_nums]
@@ -36,14 +38,13 @@ def postprocess(station_nums, ref_num):
# Calclate P on the actual samples with assumption that voronoi cells have
# equal size
- (P1, lam_vol1, io_ptr1, emulate_ptr1) = calcP.prob(samples, data,
- rho_D_M, d_distr_samples, lam_domain, d_Tree)
+ (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data, rho_D_M,
+ d_distr_samples, d_Tree)
print "Calculating prob"
mdict['P1'] = P1
mdict['lam_vol1'] = lam_vol1
mdict['lem1'] = samples
mdict['io_ptr1'] = io_ptr1
- mdict['emulate_ptr1'] = emulate_ptr1
# Export P and compare to MATLAB solution visually
sio.savemat(filename, mdict, do_compression=True)
@@ -55,6 +56,7 @@ def postprocess(station_nums, ref_num):
station_nums = [0, 4, 1] # 1, 5, 2
postprocess(station_nums, ref_num)
+"""
# q1, q5 ref 15
station_nums = [0, 4] # 1, 5
postprocess(station_nums, ref_num)
@@ -71,5 +73,5 @@ def postprocess(station_nums, ref_num):
station_nums = [0, 8, 11] # 1, 5, 12
postprocess(station_nums, ref_num)
-
+"""
diff --git a/examples/fromADCIRCMap/adaptive_2D.py b/examples/fromADCIRCMap/adaptive_2D.py
old mode 100755
new mode 100644
index 9cc5d255..1922df6d
--- a/examples/fromADCIRCMap/adaptive_2D.py
+++ b/examples/fromADCIRCMap/adaptive_2D.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# import necessary modules
import polyadcirc.run_framework.domain as dom
import polyadcirc.run_framework.random_wall_Q as rmw
@@ -7,6 +10,7 @@
import bet.sampling.adaptiveSampling as asam
import bet.sampling.basicSampling as bsam
import scipy.io as sio
+import bet.postProcess.postTools as ptools
adcirc_dir = '/work/01837/lcgraham/v50_subdomain/work'
grid_dir = adcirc_dir + '/ADCIRC_landuse/Inlet_b2/inputs/poly_walls'
@@ -114,7 +118,7 @@ def rho_D(outputs):
param_max, transition_set, kernel, sample_save_file,
initial_sample_type)
-bsam.in_high_prob(data, rho_D, maximum)
+ptools.in_high_prob(data, rho_D, maximum)
print np.mean(all_step_ratios)
diff --git a/examples/fromADCIRCMap/adaptive_3D.py b/examples/fromADCIRCMap/adaptive_3D.py
old mode 100755
new mode 100644
index 8766974f..e52e3fdd
--- a/examples/fromADCIRCMap/adaptive_3D.py
+++ b/examples/fromADCIRCMap/adaptive_3D.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# import necessary modules
import polyadcirc.run_framework.domain as dom
import polyadcirc.run_framework.random_wall_Q as rmw
@@ -7,6 +10,7 @@
import bet.sampling.adaptiveSampling as asam
import bet.sampling.basicSampling as bsam
import scipy.io as sio
+import bet.postProcess.postTools as ptools
adcirc_dir = '/work/01837/lcgraham/v50_subdomain/work'
grid_dir = adcirc_dir + '/ADCIRC_landuse/Inlet_b2/inputs/poly_walls'
@@ -112,7 +116,7 @@ def rho_D(outputs):
(samples, data, step_sizes) = sampler.generalized_chains(param_min, param_max,
transition_set, kernel_rD, sample_save_file,
initial_sample_type)
-bsam.in_high_prob(data, rho_D, maximum)
+ptools.in_high_prob(data, rho_D, maximum)
diff --git a/examples/fromFileMap/Q_2D.mat b/examples/fromFileMap/Q_2D.mat
new file mode 100644
index 00000000..575f3615
Binary files /dev/null and b/examples/fromFileMap/Q_2D.mat differ
diff --git a/examples/fromFileMap/Q_3D.mat b/examples/fromFileMap/Q_3D.mat
new file mode 100644
index 00000000..6f091664
Binary files /dev/null and b/examples/fromFileMap/Q_3D.mat differ
diff --git a/examples/fromFileMap/fromFile2D.py b/examples/fromFileMap/fromFile2D.py
old mode 100755
new mode 100644
index 4cb46e57..7c78bf88
--- a/examples/fromFileMap/fromFile2D.py
+++ b/examples/fromFileMap/fromFile2D.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# import necessary modules
import numpy as np
import bet.sampling.adaptiveSampling as asam
diff --git a/examples/fromFileMap/sandbox_test_2D.py b/examples/fromFileMap/sandbox_test_2D.py
old mode 100755
new mode 100644
index 86d02132..b9fcb0b3
--- a/examples/fromFileMap/sandbox_test_2D.py
+++ b/examples/fromFileMap/sandbox_test_2D.py
@@ -1,9 +1,13 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# import necessary modules
import numpy as np
import polyadcirc.pyADCIRC.basic as basic
import bet.sampling.adaptiveSampling as asam
import bet.sampling.basicSampling as bsam
+import bet.postProcess.postTools as ptools
import scipy.io as sio
from scipy.interpolate import griddata
@@ -71,8 +75,7 @@ def rho_D(outputs):
kernel_mm = asam.maxima_mean_kernel(np.array([Q_ref]), rho_D)
kernel_rD = asam.rhoD_kernel(maximum, rho_D)
kernel_m = asam.maxima_kernel(np.array([Q_ref]), rho_D)
-kernel_md = asam.multi_dist_kernel()
-kern_list = [kernel_mm, kernel_rD, kernel_m, kernel_md]
+kern_list = [kernel_mm, kernel_rD, kernel_m]
# Create sampler
chain_length = 125
@@ -104,11 +107,11 @@ def rho_D(outputs):
# Compare the quality of several sets of samples
print "Compare yield of sample sets with various kernels"
-bsam.compare_yield(gen_results[3], gen_results[2], gen_results[4])
+ptools.compare_yield(gen_results[3], gen_results[2], gen_results[4])
print "Compare yield of sample sets with various transition sets bounds"
-bsam.compare_yield(tk_results[3], tk_results[2], tk_results[4])
+ptools.compare_yield(tk_results[3], tk_results[2], tk_results[4])
print "Compare yield of sample sets with variouos increase/decrease ratios"
-bsam.compare_yield(incdec_results[3], incdec_results[2], incdec_results[4])
+ptools.compare_yield(incdec_results[3], incdec_results[2], incdec_results[4])
# Read in points_ref and plot results
p_ref = mdat['points_true']
diff --git a/examples/fromFileMap/sandbox_test_3D.py b/examples/fromFileMap/sandbox_test_3D.py
old mode 100755
new mode 100644
index 3137fd9a..db176206
--- a/examples/fromFileMap/sandbox_test_3D.py
+++ b/examples/fromFileMap/sandbox_test_3D.py
@@ -1,10 +1,14 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
# -*- coding: utf-8 -*-
# import necessary modules
import numpy as np
import polyadcirc.pyADCIRC.basic as basic
import bet.sampling.adaptiveSampling as asam
import bet.sampling.basicSampling as bsam
+import bet.postProcess.postTools as ptools
import scipy.io as sio
from scipy.interpolate import griddata
@@ -71,8 +75,7 @@ def rho_D(outputs):
kernel_mm = asam.maxima_mean_kernel(np.array([Q_ref]), rho_D)
kernel_rD = asam.rhoD_kernel(maximum, rho_D)
kernel_m = asam.maxima_kernel(np.array([Q_ref]), rho_D)
-kernel_md = asam.multi_dist_kernel()
-heur_list = [kernel_mm, kernel_rD, kernel_m, kernel_md]
+heur_list = [kernel_mm, kernel_rD, kernel_m]
# Create sampler
chain_length = 125
@@ -106,11 +109,11 @@ def rho_D(outputs):
result_list = [gen_results, tk_results, incdec_results]
print "Compare yield of sample sets with various kernels"
-bsam.compare_yield(gen_results[3], gen_results[2], gen_results[4])
+ptools.compare_yield(gen_results[3], gen_results[2], gen_results[4])
print "Compare yield of sample sets with various transition sets bounds"
-bsam.compare_yield(tk_results[3], tk_results[2], tk_results[4])
+ptools.compare_yield(tk_results[3], tk_results[2], tk_results[4])
print "Compare yield of sample sets with variouos increase/decrease ratios"
-bsam.compare_yield(incdec_results[3], incdec_results[2], incdec_results[4])
+ptools.compare_yield(incdec_results[3], incdec_results[2], incdec_results[4])
# Read in points_ref and plot results
p_ref = mdat['points_true']
diff --git a/examples/linearMap/linearMapUniformSampling.py b/examples/linearMap/linearMapUniformSampling.py
index 553704c4..d8b28930 100644
--- a/examples/linearMap/linearMapUniformSampling.py
+++ b/examples/linearMap/linearMapUniformSampling.py
@@ -1,4 +1,7 @@
#! /usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This example generates uniform samples on a 3D grid
and evaluates a linear map to a 2d space. Probabilities
@@ -69,6 +72,8 @@
# calc data
data= np.dot(samples,Q_map)
+np.savetxt('3to2_samples.txt.gz', samples)
+np.savetxt('3to2_data.txt.gz', data)
'''
Suggested changes for user:
@@ -122,13 +127,16 @@
Monte Carlo (it converges like 1/sqrt(n_samples)).
'''
#lambda_emulate = samples
-lambda_emulate = calculateP.emulate_iid_lebesgue(lam_domain=lam_domain, num_l_emulate = 1E5)
+lambda_emulate = calculateP.emulate_iid_lebesgue(lam_domain=lam_domain, num_l_emulate = 1E4)
# calculate probablities
(P, lambda_emulate, io_ptr, emulate_ptr) = calculateP.prob_emulated(samples=samples,
- data=data, rho_D_M = d_distr_prob, d_distr_samples = d_distr_samples,
- lam_domain=lam_domain, lambda_emulate=lambda_emulate, d_Tree=d_Tree)
+ data=data,
+ rho_D_M=d_distr_prob,
+ d_distr_samples=d_distr_samples,
+ lambda_emulate=lambda_emulate,
+ d_Tree=d_Tree)
# calculate 2d marginal probs
'''
Suggested changes for user:
diff --git a/examples/matfiles/Q_2D.mat b/examples/matfiles/Q_2D.mat
new file mode 100644
index 00000000..575f3615
Binary files /dev/null and b/examples/matfiles/Q_2D.mat differ
diff --git a/examples/matfiles/Q_3D.mat b/examples/matfiles/Q_3D.mat
new file mode 100644
index 00000000..6f091664
Binary files /dev/null and b/examples/matfiles/Q_3D.mat differ
diff --git a/examples/matfiles/sandbox2d.mat b/examples/matfiles/sandbox2d.mat
new file mode 100644
index 00000000..60ab5679
Binary files /dev/null and b/examples/matfiles/sandbox2d.mat differ
diff --git a/examples/matfiles/sandbox3d.mat b/examples/matfiles/sandbox3d.mat
new file mode 100644
index 00000000..5c9f3d1d
Binary files /dev/null and b/examples/matfiles/sandbox3d.mat differ
diff --git a/setup.py b/setup.py
index 9040d399..340beff2 100644
--- a/setup.py
+++ b/setup.py
@@ -1,13 +1,19 @@
#!/usr/bin/env python
+
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
'''
The python script for building the BET package and subpackages.
'''
from distutils.core import setup
setup(name='bet',
- version='0.1.0',
+ version='0.2.0',
description='Butler, Estep, Tavener method',
- author = 'Steven Mattis',
- author_email ='steve.a.mattis@gmail.com',
- url= 'https://github.com/smattis/BET',
- packages =['bet', 'bet.sampling', 'bet.calculateP', 'bet.postProcess', 'bet.loadBalance'])
+ author='Steven Mattis',
+ author_email='steve.a.mattis@gmail.com',
+ liscense='GNU LGPL',
+ url='https://github.com/UT-CHG/BET',
+ packages=['bet', 'bet.sampling', 'bet.calculateP', 'bet.postProcess'],
+ install_requires=['matplotlib', 'mpl_toolkits', 'pyDOE', 'scipy',
+ 'numpy'])
diff --git a/test/__init__.py b/test/__init__.py
index 1ff8b48a..3c8b769e 100644
--- a/test/__init__.py
+++ b/test/__init__.py
@@ -1,6 +1,7 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This package contains all of the tests for :program:`BET`. The package
structure mirrors the ``bet`` package structure.
"""
-__all__ = ['test_calculateP', 'test_loadBalance', 'test_sampling',
- 'test_visualize']
+__all__ = ['test_calculateP', 'test_postProcess', 'test_sampling']
diff --git a/test/test_Comm.py b/test/test_Comm.py
new file mode 100644
index 00000000..f2c9f629
--- /dev/null
+++ b/test/test_Comm.py
@@ -0,0 +1,67 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Lindley Graham 04/07/2015
+"""
+This module contains unittests for :mod:`~bet.Comm`
+"""
+
+import unittest
+import bet.Comm as Comm
+from pkgutil import iter_modules
+
+class Test_comm_for_no_mpi4py(unittest.TestCase):
+ """
+ Test :class:`bet.Comm.comm_for_no_mpi4py`.
+ """
+ def setUp(self):
+ self.comm = Comm.comm_for_no_mpi4py()
+ def test_Get_size(self):
+ self.assertEqual(self.comm.Get_size(), 1)
+ def test_Get_rank(self):
+ self.assertEqual(self.comm.Get_rank(), 0)
+ def test_allgrather(self):
+ thing = range(4)
+ self.assertEqual(self.comm.allgather(thing), thing)
+ def test_allreduce(self):
+ thing = 4
+ self.assertEqual(self.comm.allreduce(thing, op=None), thing)
+ def test_bcast(self):
+ thing = range(4)
+ self.assertEqual(self.comm.bcast(thing, root=0), thing)
+ def test_Allgather(self):
+ thing = range(4)
+ self.assertEqual(self.comm.Allgather(thing), thing)
+ def test_Allreduce(self):
+ thing1 = range(4)
+ thing2 = range(4)
+ self.assertEqual(self.comm.Allreduce(thing1, thing2,
+ op=None), thing1)
+ def test_Bcast(self):
+ thing = range(4)
+ self.assertEqual(self.comm.Bcast(thing, root=0), thing)
+ def test_Scatter(self):
+ thing1 = range(4)
+ thing2 = range(4)
+ self.assertEqual(self.comm.Scatter(thing1, thing2,
+ root=0), thing1)
+
+class Test_Comm(unittest.TestCase):
+ """
+ Test :mod:`bet.Comm`
+ """
+ def test(self):
+ if 'mpi4py' in (name for loader, name, ispkg in iter_modules()):
+ pass
+ else:
+ self.assertEqual(Comm.comm.size, 1)
+ self.assertEqual(Comm.comm.rank, 0)
+
+class Test_MPI_for_no_mpi4py(unittest.TestCase):
+ """
+ Test :class:`bet.Comm.MPI_fort_no_mpi4py`
+ """
+ def test(self):
+ MPI_no = Comm.MPI_for_no_mpi4py()
+ self.assertEqual(MPI_no.SUM, None)
+ self.assertEqual(MPI_no.DOUBLE, float)
+ self.assertEqual(MPI_no.INT, int)
diff --git a/test/test_calculateP/__init__.py b/test/test_calculateP/__init__.py
index 19a745be..9b590ba6 100644
--- a/test/test_calculateP/__init__.py
+++ b/test/test_calculateP/__init__.py
@@ -1,4 +1,7 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
-This subpackage contains tests for the ``bet.calculateP`` subpackage.
+This package contains all of the tests for :program:`BET`. The package
+structure mirrors the ``bet`` package structure.
"""
-__all__ = ['test_calulcateP', 'test_simpleFunP']
+__all__ = ['test_voronoiHistogram', 'test_calculateP', 'test_simpleFunP']
diff --git a/test/test_calculateP/datafiles/3to1_prob.txt.gz b/test/test_calculateP/datafiles/3to1_prob.txt.gz
new file mode 100644
index 00000000..503f6a98
Binary files /dev/null and b/test/test_calculateP/datafiles/3to1_prob.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to1_prob_emulated.txt.gz b/test/test_calculateP/datafiles/3to1_prob_emulated.txt.gz
new file mode 100644
index 00000000..1b01dc2e
Binary files /dev/null and b/test/test_calculateP/datafiles/3to1_prob_emulated.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to1_prob_mc.txt.gz b/test/test_calculateP/datafiles/3to1_prob_mc.txt.gz
new file mode 100644
index 00000000..3c129b33
Binary files /dev/null and b/test/test_calculateP/datafiles/3to1_prob_mc.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to2_data.txt.gz b/test/test_calculateP/datafiles/3to2_data.txt.gz
new file mode 100644
index 00000000..1b8110b1
Binary files /dev/null and b/test/test_calculateP/datafiles/3to2_data.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to2_prob.txt.gz b/test/test_calculateP/datafiles/3to2_prob.txt.gz
new file mode 100644
index 00000000..10572d27
Binary files /dev/null and b/test/test_calculateP/datafiles/3to2_prob.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to2_prob_emulated.txt.gz b/test/test_calculateP/datafiles/3to2_prob_emulated.txt.gz
new file mode 100644
index 00000000..4edc56fc
Binary files /dev/null and b/test/test_calculateP/datafiles/3to2_prob_emulated.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to2_prob_mc.txt.gz b/test/test_calculateP/datafiles/3to2_prob_mc.txt.gz
new file mode 100644
index 00000000..9227af6b
Binary files /dev/null and b/test/test_calculateP/datafiles/3to2_prob_mc.txt.gz differ
diff --git a/test/test_calculateP/datafiles/3to2_samples.txt.gz b/test/test_calculateP/datafiles/3to2_samples.txt.gz
new file mode 100644
index 00000000..d1b25817
Binary files /dev/null and b/test/test_calculateP/datafiles/3to2_samples.txt.gz differ
diff --git a/test/test_calculateP/test_calculateP.py b/test/test_calculateP/test_calculateP.py
index 721b18e6..a70d04b0 100644
--- a/test/test_calculateP/test_calculateP.py
+++ b/test/test_calculateP/test_calculateP.py
@@ -1,27 +1,32 @@
-# Lindley Graham 05/22/2014
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+# Steven Mattis and Lindley Graham 04/06/2015
"""
-This module contains tests for :module:`bet.caculcateP.calculateP`.
+This module contains tests for :module:`bet.calculateP.calculateP`.
- * compare using same lambda_emulated (iid and regular grid)
- * compare using different lambda_emulated
Most of these tests should make sure certain values are within a tolerance
rather than exact due to the stocastic nature of the algorithms being tested.
"""
-
+import os
import unittest
+import bet
import bet.calculateP.calculateP as calcP
-import numpy sa np
+import bet.calculateP.simpleFunP as simpleFunP
+import numpy as np
import scipy.spatial as spatial
import numpy.testing as nptest
+import bet.util as util
+from bet.Comm import *
+
+data_path = os.path.dirname(bet.__file__) + "/../test/test_calculateP/datafiles"
class TestEmulateIIDLebesgue(unittest.TestCase):
"""
Test :meth:`bet.calculateP.calculateP.emulate_iid_lebesgue`.
"""
- def runTest(self):
+ def setUp(self):
"""
Test dimension, number of samples, and that all the samples are within
lambda_domain.
@@ -30,529 +35,398 @@ def runTest(self):
lam_left = np.array([0.0, .25, .4])
lam_right = np.array([1.0, 4.0, .5])
- lam_domain = np.zeros((3,3))
- lam_domain[:,0] = lam_left
- lam_domain[:,1] = lam_right
-
- num_l_emulate = 1e6
-
- lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
-
- # check the dimension
- np.assert_array_equal(lambda_emulate.shape, (3, num_l_emulate))
-
- # check that the samples are all within the correct bounds
- np.assertGreaterEqual(0.0, np.min(lambda_emulate[0, :]))
- np.assertGreaterEqual(.25, np.min(lambda_emulate[1, :]))
- np.assertGreaterEqual(.4, np.min(lambda_emulate[2, :]))
- np.assertLessEqual(1.0, np.max(lambda_emulate[0, :]))
- np.assertLessEqual(4.0, np.max(lambda_emulate[1, :]))
- np.assertLessEqual(.5, np.max(lambda_emulate[2, :]))
-
-# make sure probabilties and lam_vol follow the MC assumption (i.e. for uniform
-# over the entire space they should all be the same, the hyperrectangle case
-# will be different)
-
-# compare the P calculated by the different methods on the same samples
-
-# add a skip thing to the tests involving qhull so that it only runs if that
-# package is installed
-
-# test where P is uniform over the entire domain
-# test where P is uniform over a hyperrectangle subdomain
-# test with and without optional arguments, together and separatly
-# compare using same lambda_emulated (iid and regular grid)
-# compare using different lambda_emulated
-# some will need to be within a tolerance and some will need to be exact
-# also tolerances will need to be depend on if the lambda_emulated are the same
-# and if there voronoi cells are the same, etc.
-# test on a linear model and maybe a non-linear, read from file model
-
-class TestProb(object):
- """
- Tests ``prob*`` methods in :mod:`bet.calculateP.calculateP` with a linear
- model.
- """
- @classmethod
- def setUpClass(cls):
- """
- Create inputs for ``prob*`` methods. This should run only once per
- grouping of tests from this class. But, for isolated tests it doesn't
- make any sense to do it this way. Since many of these tests are
- comparision tests it should be fine to structure things in this manner
- rather using inheritance everywhere.
- """
- # Create model associated inputs (samples, data, lam_domain)
- data_domain = np.array([[0.0, 1.0], [-1.0, 1.0], [-1.0, 0.0]])
- self.lam_domain = np.array([[.1, .2], [3, 4], [50, 60]])
- # iid samples
- self.u_samples = None #calcP.emulate_iid_lebesgue(lam_domain,
- #20**lam_domain.shape[0])
- # regular grid samples
- lam1 = np.linspace(lam_domain[0, 0], lam_domain[0, 1], 20)
- lam2 = np.linspace(lam_domain[1, 0], lam_domain[1, 1], 20)
- lam3 = np.linspace(lam_domain[2, 0], lam_domain[2, 1], 20)
- lam1, lam2, lam3 = np.meshgrid(lam1, lam2, lam3)
- self.r_samples = np.column_stack((lam1.ravel(), lam2.ravel(), lam3.ravel()))
-
- # Create lambda_emulate
- # iid
- self.u_lambda_emulate = None #calcP.emulate_iid_lebesgue(lam_domain,
- 1e6)
- # regular grid
- lam1 = np.linspace(lam_domain[0, 0], lam_domain[0, 1], 100)
- lam2 = np.linspace(lam_domain[1, 0], lam_domain[1, 1], 100)
- lam3 = np.linspace(lam_domain[2, 0], lam_domain[2, 1], 100)
- lam1, lam2, lam3 = np.meshgrid(lam1, lam2, lam3)
- self.r_lambda_emulate = np.column_stack((lam1.ravel(), lam2.ravel(),
- lam3.ravel()))
-
- def compare_to_vol(result_vol, lambda_domain):
- """
- Compare lambda_vol from the algorithm to an analytic solution.
-
- :param result_vol: lambda_vol from any of the methods in
- :mod:`~bet.calculatevol.calculatevol`
- :type result_vol: :class:`numpy.ndarray`
-
- """
- lambda_vol = np.product(lambda_domain[:,1]-lambda_domain[:,0])
- lambda_vol = lambda_vol / float(len(result_vol))
- nptest.assert_array_equal(result_vol,
- np.ones(result_vol.shape)*lambda_vol)
-
- def compare_to_vol_ae(result_vol, lambda_domain):
- """
- Compare ``lambda_vol`` from the algorithm to an analytic solution.
+ self.lam_domain = np.zeros((3,3))
+ self.lam_domain[:,0] = lam_left
+ self.lam_domain[:,1] = lam_right
- :param result_vol: lambda_vol from any of the methods in
- :mod:`~bet.calculatevol.calculatevol`
- :type result_vol: :class:`numpy.ndarray`
+ self.num_l_emulate = 1000000
+ self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain, self.num_l_emulate)
+
+ def test_dimension(self):
"""
- lambda_vol = np.product(lambda_domain[:,1]-lambda_domain[:,0])
- lambda_vol = lambda_vol / float(len(result_vol))
- nptest.assert_array_almost_equal_nulp(result_vol,
- np.ones(result_vol.shape)*lambda_vol)
-
- def compare_prob_dtree(result_wtree, result_wotree):
+ Check the dimension.
"""
+ nptest.assert_array_equal(self.lambda_emulate.shape, (int(self.num_l_emulate/size)+1,3))
- Make sure the output from
- :meth:`bet.calcuateP.calculateP.prob_emulated` matches with and without
- option arguments.
-
+ def test_bounds(self):
"""
- # calculate with d_tree
- (P, lem, io_ptr, emulate_ptr) = result_wtree
- # calculate without d_tree
- (Pt, lemt, io_ptrt, emulate_ptrt) = result_wotree
- # Compare results
- nptest.assert_array_equal(P,Pt)
- nptest.assert_array_equal(lem, lemt)
- nptest.assert_array_equal(lem, self.r_lambda_emulate)
- nptest.assert_array_equal(io_ptrt, ioptr)
- nptest.assert_array_equal(emulate_ptr, emulate_ptrt)
-
- def compare_prob_emulate(result_wsamples, result_wosamples):
+ Check that the samples are all within the correct bounds
"""
+ self.assertGreaterEqual(np.min(self.lambda_emulate[:, 0]),0.0)
+ self.assertGreaterEqual(np.min(self.lambda_emulate[:,1]), 0.25)
+ self.assertGreaterEqual(np.min(self.lambda_emulate[:, 2]), 0.4)
+ self.assertLessEqual(np.max(self.lambda_emulate[:, 0]),1.0)
+ self.assertLessEqual(np.max(self.lambda_emulate[:,1]), 4.0)
+ self.assertLessEqual(np.max(self.lambda_emulate[:,2]), 0.5)
- Make sure the output from
- :meth:`bet.calcuateP.calculateP.prob_emulated` matches with and without
- ``lambda_emulate`` when ``lambda_emulate == samples``.
+class prob:
+ def test_prob_sum_to_1(self):
+ """
+ Test to see if the prob. sums to 1.
+ """
+ nptest.assert_almost_equal(np.sum(self.P),1.0)
+ #@unittest.skipIf(size > 1, 'Only run in serial')
+ def test_P_matches_true(self):
+ """
+ Test against reference probs. (Only in serial)
+ """
+ nptest.assert_almost_equal(self.P_ref,self.P)
+ def test_vol_sum_to_1(self):
+ """
+ Test that volume ratios sum to 1.
+ """
+ nptest.assert_almost_equal(np.sum(self.lam_vol), 1.0)
+ def test_prob_pos(self):
+ """
+ Test that all probs are non-negative.
+ """
+ self.assertEqual(np.sum(np.less(self.P,0)),0)
+class prob_emulated:
+ def test_P_sum_to_1(self):
+ """
+ Test that prob. sums to 1.
"""
- # calculate with samples
- (P, lem, io_ptr, emulate_ptr) = result_wsamples
- # calculate without samples
- (Pt, lemt, io_ptrt, emulate_ptrt) = result_wosamples
- # Compare results
- nptest.assert_array_equal(P,Pt)
- nptest.assert_array_equal(lem, lemt)
- nptest.assert_array_equal(lem, self.r_lambda_emulate)
- nptest.assert_array_equal(io_ptrt,ioptr)
- nptest.assert_array_equal(emulate_ptr, emulate_ptrt)
+ nptest.assert_almost_equal(np.sum(self.P_emulate),1.0)
+ def test_P_matches_true(self):
+ """
+ Test that probabilites match reference values.
+ """
+ if size == 1:
+ nptest.assert_almost_equal(self.P_emulate_ref,self.P_emulate)
+ def test_prob_pos(self):
+ """
+ Test that all probabilites are non-negative.
+ """
+ self.assertEqual(np.sum(np.less(self.P_emulate,0)),0)
- def compare_prob(result_emulated, result_prob, result_mc):
+class prob_mc:
+ def test_P_sum_to_1(self):
+ """
+ Test that probs sum to 1.
+ """
+ nptest.assert_almost_equal(np.sum(self.P),1.0)
+ def test_P_matches_true(self):
"""
+ Test the probs. match reference values.
+ """
+ if size==1:
+ nptest.assert_almost_equal(self.P_ref,self.P)
+ def test_vol_sum_to_1(self):
+ """
+ Test that volume ratios sum to 1.
+ """
+ nptest.assert_almost_equal(np.sum(self.lam_vol), 1.0)
+ def test_prob_pos(self):
+ """
+ Test that all probs are non-negative.
+ """
+ self.assertEqual(np.sum(np.less(self.P,0)),0)
+
+
- Make sure that the output from
- :meth:`~bet.calculateP.calculateP.prob_emulated`,
- :meth:`~bet.calculateP.calculateP.prob`,
- :meth:`~bet.calculateP.calculateP.prob_mc` matches when ``lambda_emulate == samples``.
+class TestProbMethod_3to2(unittest.TestCase):
+ """
+ Sets up 3 to 2 map problem.
+ """
+ def setUp(self):
+ self.samples = np.loadtxt(data_path + "/3to2_samples.txt.gz")
+ self.data = np.loadtxt(data_path + "/3to2_data.txt.gz")
+ Q_ref = np.array([0.422, 0.9385])
+ (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
+ self.lam_domain= np.array([[0.0, 1.0],
+ [0.0, 1.0],
+ [0.0, 1.0]])
+ import numpy.random as rnd
+ rnd.seed(1)
+ self.lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain=self.lam_domain,
+ num_l_emulate = 1000)
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
+
+class Test_prob_3to2(TestProbMethod_3to2,prob):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob` on 3 to 2 map.
+ """
+ def setUp(self):
"""
- # Calculate prob
- (P, lem, io_ptr, emulate_ptr) = result_emulated
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = result_prob
- (P3, lam_vol3, lem3, io_ptr3, emulate_ptr3) = result_mc
-
- # Compare results
- nptest.assert_array_equal(P,P1)
- nptest.assert_array_equal(P,P3)
- nptest.assert_array_equal(P1,P3)
-
- nptest.assert_array_equal(lam_vol1,lam_vol3)
+ Set up problem.
+ """
+ super(Test_prob_3to2, self).setUp()
+ (self.P, self.lam_vol , _ ) = calcP.prob(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ d_Tree = self.d_Tree)
+ self.P_ref = np.loadtxt(data_path + "/3to2_prob.txt.gz")
- nptest.assert_array_equal(lem,lem1)
- nptest.assert_array_equal(lem,lem3)
- nptest.assert_array_equal(lem1,lem3)
- nptest.assert_array_equal(io_ptr,io_ptr1)
- nptest.assert_array_equal(io_ptr,io_ptr3)
- nptest.assert_array_equal(io_ptr1,io_ptr3)
-
- nptest.assert_array_equal(emulate_ptr,emulate_ptr1)
- nptest.assert_array_equal(emulate_ptr,emulate_ptr3)
- nptest.assert_array_equal(emulate_ptr1,emulate_ptr3)
- def compare_volume_rg(result_prob, result_mc):
+class Test_prob_emulated_3to2(TestProbMethod_3to2, prob_emulated):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_emulated` on a 3 to 2 map.
+ """
+ def setUp(self):
+ """
+ Set up 3 to 2 map.
"""
+ super(Test_prob_emulated_3to2, self).setUp()
+ (self.P_emulate, self.lambda_emulate, _ , _) = calcP.prob_emulated(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_emulate_ref=np.loadtxt(data_path + "/3to2_prob_emulated.txt.gz")
+ self.P_emulate = util.get_global_values(self.P_emulate)
- Make sure that the voronoi cell volumes from
- :meth:`~bet.calculateP.calculateP.prob`,
- :meth:`~bet.calculateP.calculateP.prob_mc` matches when the samples are
- all on a regular grid and ``lambda_emulate == samples``.
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
+class Test_prob_mc_3to2(TestProbMethod_3to2, prob_mc):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_mc` on a 3 to 2 map.
+ """
+ def setUp(self):
"""
- # Calculate prob
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = result_prob
- (P3, lam_vol3, lem3, io_ptr3, emulate_ptr3) = result_mc
-
- nptest.assert_array_equal(lam_vol1,lam_vol3)
- self.compare_to_mean(lam_vol1)
- self.compare_to_vol_linear(lam_vol1)
-
- def compare_volume_ae(result_prob, result_mc):
+ Set up 3 to 2 problem.
"""
+ super(Test_prob_mc_3to2, self).setUp()
+ (self.P, self.lam_vol , _ , _, _) = calcP.prob_mc(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_ref = np.loadtxt(data_path + "/3to2_prob_mc.txt.gz")
- Make sure that the voronoi cell volumes from
- :meth:`~bet.calculateP.calculateP.prob`,
- :meth:`~bet.calculateP.calculateP.prob_mc` matches when the samples are
- i.i.d and ``lambda_emulate == samples``.
-
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
+
+class TestProbMethod_3to1(unittest.TestCase):
+ """
+ Set up 3 to 1 map problem.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.samples = np.loadtxt(data_path + "/3to2_samples.txt.gz")
+ self.data = np.loadtxt(data_path + "/3to2_data.txt.gz")[:,0]
+ Q_ref = np.array([0.422])
+ (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
+ self.lam_domain= np.array([[0.0, 1.0],
+ [0.0, 1.0],
+ [0.0, 1.0]])
+ import numpy.random as rnd
+ rnd.seed(1)
+ self.lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain=self.lam_domain,
+ num_l_emulate = 1000)
+class Test_prob_3to1(TestProbMethod_3to1, prob):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob` on a 3 to 1 map.
+ """
+ def setUp(self):
+ """
+ Set up problem.
"""
- # Calculate prob
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = result_prob
- (P3, lam_vol3, lem3, io_ptr3, emulate_ptr3) = result_mc
+ super(Test_prob_3to1, self).setUp()
+ (self.P, self.lam_vol, _ ) = calcP.prob(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ d_Tree = self.d_Tree)
+ self.P_ref = np.loadtxt(data_path + "/3to1_prob.txt.gz")
+
- nptest.assert_array_equal(lam_vol1, lam_vol3)
- self.compare_to_mean_ae(lam_vol1)
- self.compare_to_vol_linear_ae(lam_vol1)
-
- def compare_lambda_emulate(result_prob, result_rg, result_iid):
+class Test_prob_emulated_3to1(TestProbMethod_3to1, prob_emulated):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_emulated` on a 3 to 1 map.
+ """
+ def setUp(self):
"""
- Compare results when lambda_emulate != samples
-
- * lambda_emulate is i.i.d. or on a regular grid
-
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
-
+ Set up problem.
"""
- # Calculate prob (has no lambda_emulate)
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = result_prob
- # Calculate prob_mc (has lambda_emulate), regular grid
- (P3, lam_vol3, lem3, io_ptr3, emulate_ptr3) = result_rg
- # Calculate prob_mc (has lambda_emulate), iid samples
- (P4, lam_vol4, lem4, io_ptr4, emulate_ptr4) = result_iid
-
- # Compare results
- nptest.assert_array_almost_equal_nulp(P1,P3)
- nptest.assert_array_almost_equal_nulp(lam_vol1, lam_vol3)
-
- nptest.assert_array_almost_equal_nulp(P4,P3)
- nptest.assert_array_almost_equal_nulp(lam_vol4, lam_vol3)
-
- nptest.assert_array_almost_equal_nulp(P1,P4)
- nptest.assert_array_almost_equal_nulp(lam_vol1, lam_vol4)
-
- def generate_results(self):
- """
- Generate the mix of results from
- :meth:~`bet.calculateP.calculateP.prob_emulated`,
- :meth:~`bet.calculateP.calculateP.prob`, and
- :meth:~`bet.calculateP.calculateP.prob_mc` to be used in test
- subclasses
- """
-
- # RESULTS WHERE SAMPLES = LAMBDA_EMULATE
- # samples are on a regular grid
- # result_wtree, result_wsamples, result_emulated_rg
- self.result_emulated_rg = calcP.prob_emulated(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.r_samples, self.d_Tree)
- self.result_wtree = result_emulated_rg
- self.result_wsamples = result_emulated_rg
- self.result_wotree = calcP.prob_emulated(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.r_samples)
- self.result_wosamples = calcP.prob_emulated(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain)
-
- self.result_prob_rg = calcP.prob(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.d_Tree)
- self.result_mc_rg = calcP.prob_mc(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.r_samples, self.d_Tree)
-
- # samples are iid
- self.result_emulated_iid = calcP.prob_emulated(self.u_samples, self.u_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.u_samples, self.d_Tree)
- self.result_prob_iid = calcP.prob(self.u_samples, self.u_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.d_Tree)
- self.result_mc_iid = calcP.prob_mc(self.u_samples, self.u_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.u_samples, self.d_Tree)
-
- # RESULTS WHERE SAMPLES != LAMBDA_EMULATE
- # result_emu_samples_emulatedsamples
- self.result_emu_rg_rg = calcP.prob_mc(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.r_lambda_emulate, self.d_Tree)
- self.result_emu_rg_iid = calcP.prob_mc(self.r_samples, self.r_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.u_lambda_emulate, self.d_Tree)
- self.result_emu_iid_rg = calcP.prob_mc(self.u_samples, self.u_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.r_lambda_emulate, self.d_Tree)
- self.result_emu_iid_iid = calcP.prob_mc(self.u_samples, self.u_data,
- self.rho_D_M, self.d_distr_samples, self.lam_domain,
- self.u_lambda_emulate, self.d_Tree)
-
-
-class LinearModel(CompareProb):
- """
- Tests ``prob*`` methods in :mod:`bet.calculateP.calculateP` with a linear
- model.
- """
- @classmethod
- def setUpClass(cls):
- """
- Create inputs for ``prob*`` methods. This should run only once per
- grouping of tests from this class. But, for isolated tests it doesn't
- make any sense to do it this way. Since many of these tests are
- comparision tests it should be fine to structure things in this manner
- rather using inheritance everywhere.
- """
- # This might not be the right invocation.
- super(TestLinearModel, self).setUpClass(TestLinearModel)
- # linear model
- self.rl_data = np.dot(r_samples, data_domain)
- self.ul_data = np.dot(u_samples, data_domain)
-
-class NonLinearModel(CompareProb):
- """
- Tests ``prob*`` methods in :mod:`bet.calculateP.calculateP` with a linear
- model.
- """
- @classmethod
- def setUpClass(cls):
- """
- Create inputs for ``prob*`` methods. This should run only once per
- grouping of tests from this class. But, for isolated tests it doesn't
- make any sense to do it this way. Since many of these tests are
- comparision tests it should be fine to structure things in this manner
- rather using inheritance everywhere.
- """
- # This might not be the right invocation.
- super(NonLinearModel, self).setUpClass(NonLinearModel)
- # non-linear model
- def nonlinear_model(l_data):
- n_data = l_data
- n_data[:,0] = l_data[:,0] + l_data[:,2]
- n_data[:,1] = np.square(l_data[:,1])
- n_data[:,2] = l_data[:,0] - n_data[:,1]
- return n_data
- self.rn_data = nonlinear_model(rl_data)
- self.un_data = nonlinear_model(ul_data)
-
-class TestProbUnifLinear(LinearModel, unittest.TestCase):
- """
- Tests ``prob*`` methods in :mod:`bet.calculateP.calculateP`.
- """
- @classmethod
- def setUpClass(cls):
- """
- Create inputs for ``prob*`` methods. This should run only once per
- grouping of tests from this class. But, for isolated tests it doesn't
- make any sense to do it this way. Since many of these tests are
- comparision tests it should be fine to structure things in this manner
- rather using inheritance everywhere.
- """
- # This might not be the right invocation.
- super(TestProbUnifLinear, self).setUpClass(TestProbUnifLinear)
-
- # Create rho_D_M associated inputs (rho_D_M, d_distr_samples, d_tree)
- # UNIFORM
- self.d_distr_dsamples = np.mean(self.data_domain, 1)
- self.rho_D_M = 1
- self.d_Tree = spatial.KDTree(self.d_distr_samples)
+ super(Test_prob_emulated_3to1, self).setUp()
+ (self.P_emulate, self.lambda_emulate, _ , _) = calcP.prob_emulated(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_emulate_ref=np.loadtxt(data_path + "/3to1_prob_emulated.txt.gz")
+ self.P_emulate = util.get_global_values(self.P_emulate)
+
- # Generate results
- super(TestProbUnifLinear, self).generate_results(self)
- def compare_to_unif_linear(result_P):
+class Test_prob_mc_3to1(TestProbMethod_3to1, prob_mc):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_mc` on a 3 to 1 map.
+ """
+ def setUp(self):
+ """
+ Set up problem.
"""
- Compare P from the algorithm to an analytic solution where $P_\mathcal{D}$ is uniform
- over $\mathbf{D}$ (linear model only).
+ super(Test_prob_mc_3to1, self).setUp()
+ (self.P, self.lam_vol , _ , _, _) = calcP.prob_mc(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_ref = np.loadtxt(data_path + "/3to1_prob_mc.txt.gz")
- :param result_P: P from any of the methods in
- :mod:`~bet.calculateP.calculateP`
- :type result_P: :class:`numpy.ndarray`
+
+class TestProbMethod_10to4(unittest.TestCase):
+ """
+ Sets up 10 to 4 map problem.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ import numpy.random as rnd
+ rnd.seed(1)
+ self.lam_domain=np.zeros((10,2))
+ self.lam_domain[:,0]=0.0
+ self.lam_domain[:,1]=1.0
+ self.num_l_emulate = 1000
+ self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain, self.num_l_emulate)
+ self.samples = calcP.emulate_iid_lebesgue(self.lam_domain, 100)
+ self.data = np.dot(self.samples,rnd.rand(10,4))
+ Q_ref = np.mean(self.data, axis=0)
+ (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
+
+ @unittest.skip("No reference data")
+ def test_P_matches_true(self):
+ pass
+class Test_prob_10to4(TestProbMethod_10to4, prob):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob` on a 10 to 4 map.
+ """
+ def setUp(self):
"""
- nptest.assert_array_equal(result_P,
- np.ones(result_P.shape)/float(len(result_P)))
-
- def compare_to_unif_linear_ae(result_P):
+ Set up problem.
"""
- Compare P from the algorithm to an analytic solution where $P_\mathcal{D}$ is uniform
- over $\mathbf{D}$ (linear model only).
+ super(Test_prob_10to4, self).setUp()
+ (self.P, self.lam_vol , _ ) = calcP.prob(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ d_Tree = self.d_Tree)
- :param result_P: P from any of the methods in
- :mod:`~bet.calculateP.calculateP`
- :type result_P: :class:`numpy.ndarray`
+
+class Test_prob_emulated_10to4(TestProbMethod_10to4, prob_emulated):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_emulated` on a 10 to 4 map.
+ """
+ def setUp(self):
"""
- nptest.assert_array_almost_equal_nulp(result_P,
- np.ones(result_P.shape)/float(len(result_P)))
-
- def test_prob_dtree(self):
+ Set up problem.
"""
+ super(Test_prob_emulated_10to4, self).setUp()
- Make sure the output from
- :meth:`bet.calcuateP.calculateP.prob_emulated` matches with and without
- option arguments.
+ (self.P_emulate, self.lambda_emulate, _ , _) = calcP.prob_emulated(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_emulate = util.get_global_values(self.P_emulate)
- """
- compare_prob_dtree(self.result_wtree, self.result_wotree)
- def test_prob_emulate(self):
- """
- Make sure the output from
- :meth:`bet.calcuateP.calculateP.prob_emulated` matches with and without
- ``lambda_emulate`` when ``lambda_emulate == samples``.
+class Test_prob_mc_10to4(TestProbMethod_10to4, prob_mc):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_mc` on a 10 to 4 map.
+ """
+ def setUp(self):
"""
- compare_prob_emulate(self.result_wsamples, self.result_wosamples)
-
- def test_prob_rg(self):
+ Set up problem.
"""
+ super(Test_prob_mc_10to4, self).setUp()
+ (self.P, self.lam_vol , _ , _, _) = calcP.prob_mc(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
- Make sure that the output from
- :meth:`~bet.calculateP.calculateP.prob_emulated`,
- :meth:`~bet.calculateP.calculateP.prob`,
- :meth:`~bet.calculateP.calculateP.prob_mc` matches when the samples are
- all on a regular grid when ``lambda_emulate == samples``.
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
+class TestProbMethod_1to1(unittest.TestCase):
+ """
+ Sets up 1 to 1 map problem. Uses vectors instead of 2D arrays.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ import numpy.random as rnd
+ rnd.seed(1)
+ self.lam_domain=np.zeros((1,2))
+ self.lam_domain[0,0]=0.0
+ self.lam_domain[0,1]=1.0
+ self.num_l_emulate = 1000
+ self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain, self.num_l_emulate)
+ self.samples = rnd.rand(100,)
+ self.data = 2.0*self.samples
+ Q_ref = np.mean(self.data, axis=0)
+ (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
+ @unittest.skip("No reference data")
+ def test_P_matches_true(self):
+ pass
+class Test_prob_1to1(TestProbMethod_1to1, prob):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob` on a 1 to 1 map.
+ """
+ def setUp(self):
+ """
+ Set up problem.
"""
- compare_prob(self.result_emulated_rg, self.result_prob_rg,self.result_mc_rg)
- compare_volume_rg(self.results_prob_rg, self.result_mc_rg)
+ super(Test_prob_1to1, self).setUp()
+ (self.P, self.lam_vol, _ ) = calcP.prob(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ d_Tree = self.d_Tree)
+
- def test_prob_iid(self):
+class Test_prob_emulated_1to1(TestProbMethod_1to1, prob_emulated):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_emulated` on a 1 to 1 map.
+ """
+ def setUp(self):
+ """
+ Set up problem.
"""
+ super(Test_prob_emulated_1to1, self).setUp()
- Make sure that the output from
- :meth:`~bet.calculateP.calculateP.prob_emulated`,
- :meth:`~bet.calculateP.calculateP.prob`,
- :meth:`~bet.calculateP.calculateP.prob_mc` matches when the samples are
- i.i.d. with respect to the Lebesgue measure when ``lambda_emulate == samples``.
+ (self.P_emulate, self.lambda_emulate, _ , _) = calcP.prob_emulated(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+ self.P_emulate = util.get_global_values(self.P_emulate)
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
+class Test_prob_mc_1to1(TestProbMethod_1to1, prob_mc):
+ """
+ Test :meth:`bet.calculateP.calculateP.prob_mc` on a 1 to 1 map.
+ """
+ def setUp(self):
"""
- compare_prob(self.result_emulated_iid, self.result_prob_iid,
- self.result_mc_iid)
- compare_volume_ae(self.results_prob_iid, self.result_mc_iid)
-
- def test_l_emulate_rg(self):
- """
- Compare results when lambda_emulate != samples
-
- * samples are on a regular grid
- * lambda_emulate is i.i.d. or on a regular grid
-
- .. note::
- This method also needs to include
- :meth:`~bet.calculateP.calculateP.prob_qhull` if and only if the
- user has the Python `pyhull `_
- package installed.
-
- """
- compare_lambda_emulate(self.result_prob_rg, self.result_emu_rg_rg,
- self.result_emu_rg_iid)
- compare_volume_ae(
- compare_volume_ae(
- compare_volume_ae(
-
- # Calculate prob (has no lambda_emulate)
- (P1, lam_vol1, lem1, io_ptr1, emulate_ptr1) = calc.prob(self.u_samples,
- self.ul_data, self.u_rho, self.u_dsamples, self.lam_domain,
- self.ud_tree)
-
- # Calculate prob_mc (has lambda_emulate)
- (P3, lam_vol3, lem3, io_ptr3, emulate_ptr3) = calc.prob_mc(self.u_samples,
- self.ul_data, self.u_rho, self.u_dsamples, self.lam_domain,
- self.u_lambda_emulate, self.ud_tree)
-
- # Compare to mean
- self.compare_to_mean_ae(P3)
- self.compare_to_mean_ae(lam_vol3)
-
- (P4, lam_vol4, lem4, io_ptr4, emulate_ptr4) = calc.prob_mc(self.u_samples,
- self.ul_data, self.u_rho, self.u_dsamples, self.lam_domain,
- self.u_lambda_emulate, self.ud_tree)
-
- # Compare to mean
- self.compare_to_mean_ae(P4)
- self.compare_to_mean_ae(lam_vol4)
-
- # Compare results
- nptest.assert_array_almost_equal_nulp(P1,P3)
- nptest.assert_array_almost_equal_nulp(lam_vol1,lam_vol3)
-
- nptest.assert_array_almost_equal_nulp(P4,P3)
- nptest.assert_array_almost_equal_nulp(lam_vol4,lam_vol3)
-
- nptest.assert_array_almost_equal_nulp(P1,P4)
- nptest.assert_array_almost_equal_nulp(lam_vol1,lam_vol4
- )
+ Set up problem.
+ """
+ super(Test_prob_mc_1to1, self).setUp()
+ (self.P, self.lam_vol , _ , _, _) = calcP.prob_mc(samples=self.samples,
+ data=self.data,
+ rho_D_M = self.d_distr_prob,
+ d_distr_samples = self.d_distr_samples,
+ lambda_emulate = self.lambda_emulate,
+ d_Tree = self.d_Tree)
+
- def test_compare_to_analytic_solution(self):
- pass
diff --git a/test/test_calculateP/test_simpleFunP.py b/test/test_calculateP/test_simpleFunP.py
index e69de29b..d20ef4f1 100644
--- a/test/test_calculateP/test_simpleFunP.py
+++ b/test/test_calculateP/test_simpleFunP.py
@@ -0,0 +1,864 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Lindley Graham 04/09/2015
+
+"""
+This module contains tests for :module:`bet.calculateP.simpleFunP`
+
+Some of these tests make sure certain values are within a tolerance rather than
+exact due to the stochastic nature of the algorithms being tested.
+
+The ouput of all the methods being tested is of the form (rho_D_M,
+d_distr_samples, d_Tree) where ``rho_D_M`` is (M,) and ``d_distr_samples`` are
+(M, mdim) :class:`~numpy.ndarray` and `d_Tree` is the
+:class:`~scipy.spatial.KDTree` for d_distr_samples.
+
+"""
+
+import os, bet, unittest, collections
+import bet.calculateP.simpleFunP as sFun
+import numpy as np
+import numpy.testing as nptest
+
+local_path = os.path.join(os.path.dirname(bet.__file__),
+'../test/test_calulateP')
+
+class prob(object):
+ """
+ Test that the probabilties sum to 1, are non-negative, and have the correct
+ dimensions.
+ """
+ def test_rho_D_M_sum_to_1(self):
+ """
+ Test that probabilities sum to 1.
+ """
+ nptest.assert_almost_equal(np.sum(self.rho_D_M), 1.0)
+ def test_rho_D_M_pos(self):
+ """
+ Test that probabilities are non-negative.
+ """
+ assert True == np.all(self.rho_D_M >= 0.0)
+ def test_dimensions(self):
+ """
+ Test that the dimensions of the outputs are correct.
+ """
+ assert self.rho_D_M.shape[0] == self.d_distr_samples.shape[0]
+ assert self.mdim == self.d_distr_samples.shape[1]
+ assert (self.d_Tree.n, self.d_Tree.m) == self.d_distr_samples.shape
+
+
+class prob_uniform(prob):
+ """
+ Test that the probabilities within the prescribed domain are non-zero and
+ that the probabilities outside of the prescribed domain are zero.
+ """
+
+ def test_domain(self):
+ """
+ Test that the probabilities within the prescribed domain are non-zero
+ and that the probabilities outside of the prescribed domain are zero.
+ """
+ # d_distr_samples are (mdim, M)
+ # rect_domain is (mdim, 2)
+ inside = np.logical_and(np.all(np.greater_equal(self.d_distr_samples,
+ self.rect_domain[:, 0]), axis=1),
+ np.all(np.less_equal(self.d_distr_samples,
+ self.rect_domain[:, 1]), axis=1))
+ assert np.all(self.rho_D_M[inside] >= 0.0)
+ #print self.rect_domain
+ #print "ind, inside", inside
+ #print "ind, outside", np.logical_not(inside)
+ #print "inside", self.d_distr_samples[inside]
+ #print "outside", self.d_distr_samples[np.logical_not(inside)]
+ #print "inside", self.rho_D_M[inside]
+ #print "outside", self.rho_D_M[np.logical_not(inside)]
+ assert np.all(self.rho_D_M[np.logical_not(inside)] == 0.0)
+
+
+class data_01D(object):
+ """
+ Sets up 01D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100,))*10.0
+ self.Q_ref = 5.0
+ self.data_domain = np.array([0.0, 10.0])
+ self.mdim = 1
+
+
+class data_1D(object):
+ """
+ Sets up 1D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 1))*10.0
+ self.Q_ref = np.array([5.0])
+ self.data_domain = np.expand_dims(np.array([0.0, 10.0]), axis=0)
+ self.mdim = 1
+
+
+class data_2D(object):
+ """
+ Sets up 2D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 2))*10.0
+ self.Q_ref = np.array([5.0, 5.0])
+ self.data_domain = np.array([[0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 2
+
+
+class data_3D(object):
+ """
+ Sets up 3D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 3))*10.0
+ self.Q_ref = np.array([5.0, 5.0, 5.0])
+ self.data_domain = np.array([[0.0, 10.0], [0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 3
+
+class unif_unif(prob_uniform):
+ """
+ Set up :meth:`bet.calculateP.simpleFunP.unif_unif` on data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.unif_unif(self.data,
+ self.Q_ref, M=67, bin_ratio=0.1, num_d_emulate=1E3)
+
+ if type(self.Q_ref) != np.array:
+ self.Q_ref = np.array([self.Q_ref])
+ if len(self.data_domain.shape) == 1:
+ self.data_domain = np.expand_dims(self.data_domain, axis=0)
+
+ self.rect_domain = np.zeros((self.data_domain.shape[0], 2))
+ r_width = 0.1*self.data_domain[:, 1]
+
+ self.rect_domain[:, 0] = self.Q_ref - .5*r_width
+ self.rect_domain[:, 1] = self.Q_ref + .5*r_width
+
+ def test_M(self):
+ """
+ Test that the right number of d_distr_samples are used to create
+ rho_D_M.
+ """
+ assert len(self.rho_D_M) == 67
+
+ def test_domain(self):
+ """
+ Test that the probabilities within the prescribed domain are non-zero
+ and that the probabilities outside of the prescribed domain are zero.
+ """
+ # d_distr_samples are (mdim, M)
+ # rect_domain is (mdim, 2)
+ inside = np.logical_and(np.all(np.greater_equal(self.d_distr_samples,
+ self.rect_domain[:, 0]), axis=1),
+ np.all(np.less_equal(self.d_distr_samples,
+ self.rect_domain[:, 1]), axis=1))
+ msg = "Due to the inherent randomness of this method, this may fail."
+ print msg
+ print np.sum(self.rho_D_M[inside] >= 0.0)
+ assert np.sum(self.rho_D_M[inside] >= 0.0)<100
+ print np.sum(self.rho_D_M[np.logical_not(inside)] == 0.0)
+ assert np.sum(self.rho_D_M[np.logical_not(inside)] == 0.0)<100
+
+class test_unif_unif_01D(data_01D, unif_unif):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.unif_unif` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_unif_unif_01D, self).createData()
+ super(test_unif_unif_01D, self).setUp()
+
+class test_unif_unif_1D(data_1D, unif_unif):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.unif_unif` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_unif_unif_1D, self).createData()
+ super(test_unif_unif_1D, self).setUp()
+
+
+class test_unif_unif_2D(data_2D, unif_unif):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.unif_unif` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_unif_unif_2D, self).createData()
+ super(test_unif_unif_2D, self).setUp()
+
+
+class test_unif_unif_3D(data_3D, unif_unif):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.unif_unif` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_unif_unif_3D, self).createData()
+ super(test_unif_unif_3D, self).setUp()
+
+class normal_normal(prob):
+ """
+ Set up :meth:`bet.calculateP.simpleFunP.normal_normal` on data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ if type(self.Q_ref) != np.array and type(self.Q_ref) != np.ndarray:
+ std = 1.0
+ else:
+ std = np.ones(self.Q_ref.shape)
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.normal_normal(self.Q_ref,
+ M=67, std=std, num_d_emulate=1E3)
+
+ def test_M(self):
+ """
+ Test that the right number of d_distr_samples are used to create
+ rho_D_M.
+ """
+ assert len(self.rho_D_M) == 67
+
+class test_normal_normal_01D(data_01D, normal_normal):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.normal_normal` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_normal_normal_01D, self).createData()
+ super(test_normal_normal_01D, self).setUp()
+
+class test_normal_normal_1D(data_1D, normal_normal):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.normal_normal` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_normal_normal_1D, self).createData()
+ super(test_normal_normal_1D, self).setUp()
+
+
+class test_normal_normal_2D(data_2D, normal_normal):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.normal_normal` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_normal_normal_2D, self).createData()
+ super(test_normal_normal_2D, self).setUp()
+
+
+class test_normal_normal_3D(data_3D, normal_normal):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.normal_normal` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_normal_normal_3D, self).createData()
+ super(test_normal_normal_3D, self).setUp()
+
+
+class uniform_hyperrectangle_base(prob_uniform):
+ """
+ Provides set up and a test to check the number of ``d_distr_samples`` for
+ an exact simple function approximation of a hyperrectangle.
+ """
+ def test_M(self):
+ """
+ Test that the right number of d_distr_samples are used to create
+ rho_D_M.
+ """
+ if not isinstance(self.center_pts_per_edge, collections.Iterable):
+ assert len(self.rho_D_M) == (self.center_pts_per_edge+2)**self.mdim
+ else:
+ assert len(self.rho_D_M) == np.prod(self.center_pts_per_edge+2)
+
+
+class uniform_hyperrectangle_int(uniform_hyperrectangle_base):
+ """
+ Set up :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_*` with an
+ int type value for ``center_pts_per_edge``.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.center_pts_per_edge = 2
+
+
+class uniform_hyperrectangle_list(uniform_hyperrectangle_base):
+ """
+ Set up :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_*` with an
+ iterable type value for ``center_pts_per_edge``.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.center_pts_per_edge = 2*np.ones((self.mdim,), dtype=np.int)
+
+class uniform_hyperrectangle_user_int(uniform_hyperrectangle_int):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_user_int, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ r_width = 0.1*data_domain[:, 1]
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle_user(self.data,
+ self.rect_domain.transpose(), self.center_pts_per_edge)
+
+class uniform_hyperrectangle_user_list(uniform_hyperrectangle_list):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_user_list, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ r_width = 0.1*data_domain[:, 1]
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle_user(self.data,
+ self.rect_domain.transpose(), self.center_pts_per_edge)
+
+
+class test_uniform_hyperrectangle_user_int_01D(data_01D, uniform_hyperrectangle_user_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_int` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_int_01D, self).createData()
+ super(test_uniform_hyperrectangle_user_int_01D, self).setUp()
+
+class test_uniform_hyperrectangle_user_int_1D(data_1D, uniform_hyperrectangle_user_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_int` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_int_1D, self).createData()
+ super(test_uniform_hyperrectangle_user_int_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_user_int_2D(data_2D, uniform_hyperrectangle_user_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_int` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_int_2D, self).createData()
+ super(test_uniform_hyperrectangle_user_int_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_user_int_3D(data_3D, uniform_hyperrectangle_user_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_int` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_int_3D, self).createData()
+ super(test_uniform_hyperrectangle_user_int_3D, self).setUp()
+
+
+class test_uniform_hyperrectangle_user_list_01D(data_01D, uniform_hyperrectangle_user_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_list` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_list_01D, self).createData()
+ super(test_uniform_hyperrectangle_user_list_01D, self).setUp()
+
+class test_uniform_hyperrectangle_user_list_1D(data_1D, uniform_hyperrectangle_user_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_list` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_list_1D, self).createData()
+ super(test_uniform_hyperrectangle_user_list_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_user_list_2D(data_2D, uniform_hyperrectangle_user_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_list` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_list_2D, self).createData()
+ super(test_uniform_hyperrectangle_user_list_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_user_list_3D(data_3D, uniform_hyperrectangle_user_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_user_list` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_user_list_3D, self).createData()
+ super(test_uniform_hyperrectangle_user_list_3D, self).setUp()
+
+
+class uniform_hyperrectangle_size_int(uniform_hyperrectangle_int):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_size_int, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ binsize = 1.0
+ r_width = binsize*np.ones(data_domain[:, 1].shape)
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle_binsize(self.data,
+ self.Q_ref, binsize, self.center_pts_per_edge)
+
+class uniform_hyperrectangle_size_list(uniform_hyperrectangle_list):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_size_list, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ binsize = 1.0*np.ones((data_domain.shape[0],))
+ r_width = binsize
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle_binsize(self.data,
+ self.Q_ref, binsize, self.center_pts_per_edge)
+
+
+class test_uniform_hyperrectangle_size_int_01D(data_01D, uniform_hyperrectangle_size_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_int` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_int_01D, self).createData()
+ super(test_uniform_hyperrectangle_size_int_01D, self).setUp()
+
+class test_uniform_hyperrectangle_size_int_1D(data_1D, uniform_hyperrectangle_size_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_int` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_int_1D, self).createData()
+ super(test_uniform_hyperrectangle_size_int_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_size_int_2D(data_2D, uniform_hyperrectangle_size_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_int` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_int_2D, self).createData()
+ super(test_uniform_hyperrectangle_size_int_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_size_int_3D(data_3D, uniform_hyperrectangle_size_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_int` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_int_3D, self).createData()
+ super(test_uniform_hyperrectangle_size_int_3D, self).setUp()
+
+
+class test_uniform_hyperrectangle_size_list_01D(data_01D, uniform_hyperrectangle_size_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_list` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_list_01D, self).createData()
+ super(test_uniform_hyperrectangle_size_list_01D, self).setUp()
+
+class test_uniform_hyperrectangle_size_list_1D(data_1D, uniform_hyperrectangle_size_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_list` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_list_1D, self).createData()
+ super(test_uniform_hyperrectangle_size_list_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_size_list_2D(data_2D, uniform_hyperrectangle_size_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_list` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_list_2D, self).createData()
+ super(test_uniform_hyperrectangle_size_list_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_size_list_3D(data_3D, uniform_hyperrectangle_size_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_size_list` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_size_list_3D, self).createData()
+ super(test_uniform_hyperrectangle_size_list_3D, self).setUp()
+
+class uniform_hyperrectangle_ratio_int(uniform_hyperrectangle_int):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_ratio_int, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ binratio = 0.1
+ r_width = binratio*data_domain[:, 1]
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle(self.data,
+ self.Q_ref, binratio, self.center_pts_per_edge)
+
+class uniform_hyperrectangle_ratio_list(uniform_hyperrectangle_list):
+ """
+ Set up :met:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio` with an
+ int type of value fo r``center_pts_per_edge``
+
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(uniform_hyperrectangle_ratio_list, self).setUp()
+ if type(self.Q_ref) != np.array:
+ Q_ref = np.array([self.Q_ref])
+ else:
+ Q_ref = self.Q_ref
+ if len(self.data_domain.shape) == 1:
+ data_domain = np.expand_dims(self.data_domain, axis=0)
+ else:
+ data_domain = self.data_domain
+
+ self.rect_domain = np.zeros((data_domain.shape[0], 2))
+ binratio = 0.1*np.ones((data_domain.shape[0],))
+ r_width = binratio*data_domain[:,1]
+
+ self.rect_domain[:, 0] = Q_ref - .5*r_width
+ self.rect_domain[:, 1] = Q_ref + .5*r_width
+
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_hyperrectangle(self.data,
+ self.Q_ref, binratio, self.center_pts_per_edge)
+
+
+class test_uniform_hyperrectangle_ratio_int_01D(data_01D, uniform_hyperrectangle_ratio_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_int` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_int_01D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_int_01D, self).setUp()
+
+class test_uniform_hyperrectangle_ratio_int_1D(data_1D, uniform_hyperrectangle_ratio_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_int` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_int_1D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_int_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_ratio_int_2D(data_2D, uniform_hyperrectangle_ratio_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_int` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_int_2D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_int_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_ratio_int_3D(data_3D, uniform_hyperrectangle_ratio_int):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_int` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_int_3D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_int_3D, self).setUp()
+
+
+class test_uniform_hyperrectangle_ratio_list_01D(data_01D, uniform_hyperrectangle_ratio_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_list` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_list_01D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_list_01D, self).setUp()
+
+class test_uniform_hyperrectangle_ratio_list_1D(data_1D, uniform_hyperrectangle_ratio_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_list` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_list_1D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_list_1D, self).setUp()
+
+
+class test_uniform_hyperrectangle_ratio_list_2D(data_2D, uniform_hyperrectangle_ratio_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_list` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_list_2D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_list_2D, self).setUp()
+
+
+class test_uniform_hyperrectangle_ratio_list_3D(data_3D, uniform_hyperrectangle_ratio_list):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_hyperrectangle_ratio_list` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_hyperrectangle_ratio_list_3D, self).createData()
+ super(test_uniform_hyperrectangle_ratio_list_3D, self).setUp()
+
+class uniform_data(prob_uniform):
+ """
+ Set up :meth:`bet.calculateP.simpleFunP.uniform_data` on data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = sFun.uniform_data(self.data)
+
+ if type(self.Q_ref) != np.array:
+ self.Q_ref = np.array([self.Q_ref])
+ if len(self.data_domain.shape) == 1:
+ self.data_domain = np.expand_dims(self.data_domain, axis=0)
+ self.rect_domain = self.data_domain
+
+ def test_M(self):
+ """
+ Test that the right number of d_distr_samples are used to create
+ rho_D_M.
+ """
+ assert len(self.rho_D_M) == self.data.shape[0]
+
+class test_uniform_data_01D(data_01D, uniform_data):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_data` on 01D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_data_01D, self).createData()
+ super(test_uniform_data_01D, self).setUp()
+
+class test_uniform_data_1D(data_1D, uniform_data):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_data` on 1D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_data_1D, self).createData()
+ super(test_uniform_data_1D, self).setUp()
+
+
+class test_uniform_data_2D(data_2D, uniform_data):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_data` on 2D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_data_2D, self).createData()
+ super(test_uniform_data_2D, self).setUp()
+
+
+class test_uniform_data_3D(data_3D, uniform_data):
+ """
+ Tests :meth:`bet.calculateP.simpleFunP.uniform_data` on 3D data domain.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ super(test_uniform_data_3D, self).createData()
+ super(test_uniform_data_3D, self).setUp()
+
+
diff --git a/test/test_calculateP/test_voronoiHistogram.py b/test/test_calculateP/test_voronoiHistogram.py
new file mode 100644
index 00000000..20795ee3
--- /dev/null
+++ b/test/test_calculateP/test_voronoiHistogram.py
@@ -0,0 +1,604 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# -*- coding: utf-8 -*-
+# Lindley Graham 04/12/2015
+"""
+Test methods in :mod:`bet.calculateP.voronoiHistogram`. Since this module is
+not meant to be directly accessed by the user we only test for dimensions 1, 2,
+3. We also assume that ``center_points_per_edge`` is a list as specified in the
+docString for the methods in :mod:`bet.calculateP.voronoiHistogram`. In other
+words, all the dimensions of any arrays must be correct before calling these
+methods.
+"""
+
+import os, unittest, collections
+import bet.calculateP.voronoiHistogram as vHist
+import bet.util as util
+import numpy as np
+import numpy.testing as nptest
+from test.test_calculateP.test_simpleFunP import prob_uniform
+
+# Do below for dimensions 01, 1, 2, and 3
+class domain_1D(object):
+ """
+ Sets up 1D domain domain problem.
+ """
+ def createDomain(self):
+ """
+ Set up data.
+ """
+ self.center = np.array([5.0])
+ self.sur_domain = np.expand_dims(np.array([0.0, 10.0]), axis=0)
+ self.mdim = 1
+ self.center_pts_per_edge = [1]
+
+
+class domain_2D(object):
+ """
+ Sets up 2D domain domain problem.
+ """
+ def createDomain(self):
+ """
+ Set up data.
+ """
+ self.center = np.array([5.0, 5.0])
+ self.sur_domain = np.array([[0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 2
+ self.center_pts_per_edge = [1,2]
+
+
+class domain_3D(object):
+ """
+ Sets up 3D domain domain problem.
+ """
+ def createDomain(self):
+ """
+ Set up data.
+ """
+ self.center = np.array([5.0, 5.0, 5.0])
+ self.sur_domain = np.array([[0.0, 10.0], [0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 3
+ self.center_pts_per_edge = [1,2,1]
+
+class center_and_layer1_points(object):
+ """
+ Test :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points`
+ """
+ def setUp(self):
+ """
+ Set up the problem for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points`
+ """
+ self.create_output()
+ output = vHist.center_and_layer1_points(self.center_pts_per_edge,
+ self.center, self.r_ratio, self.sur_domain)
+ self.points, self.interior_and_layer1_VH, self.rect_domain_VH = output
+
+ def create_output(self):
+ """
+ Create output to test the output of
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points`
+ against.
+
+ :param r_ratio: The ratio of the length of the sides of the
+ hyperrectangle to the surrounding domain
+ :type r_ratio: int or list()
+ """
+ sur_width = self.sur_domain[:, 1] - self.sur_domain[:, 0]
+ rect_width = self.r_ratio*sur_width
+ self.rect_domain = np.empty(self.sur_domain.shape)
+ self.rect_domain[:, 0] = self.center - .5*rect_width
+ self.rect_domain[:, 1] = self.center + .5*rect_width
+ if not isinstance(self.center_pts_per_edge, np.ndarray):
+ self.center_pts_per_edge = np.array(self.center_pts_per_edge)
+ layer1_left = self.rect_domain[:, 0]-rect_width/(2*self.center_pts_per_edge)
+ layer1_right = self.rect_domain[:, 1]+rect_width/(2*self.center_pts_per_edge)
+ self.interior_and_layer1 = list()
+ for dim in xrange(self.mdim):
+ self.interior_and_layer1.append(np.linspace(layer1_left[dim],
+ layer1_right[dim], self.center_pts_per_edge[dim]+2))
+
+ def test_dimensions(self):
+ """
+ Test the dimensions for :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points`
+ """
+ assert self.points.shape == (np.prod(self.center_pts_per_edge+2),
+ self.mdim)
+ assert len(self.interior_and_layer1_VH) == self.mdim
+ nptest.assert_array_almost_equal(self.center_pts_per_edge+2,
+ [len(dim_layer) for dim_layer in self.interior_and_layer1_VH])
+ assert self.rect_domain.shape == (self.mdim, 2)
+
+ def test_rect_domain(self):
+ """
+ Test that the ``rect_domain`` is correct.
+ """
+ nptest.assert_array_almost_equal(self.rect_domain,
+ self.rect_domain_VH)
+
+ def test_bounding_layer(self):
+ """
+ Test that the interior_and_layer1 is correct.
+ """
+ compare_list = list()
+ for mine, meth in zip(self.interior_and_layer1, self.interior_and_layer1_VH):
+ compare_list.append(np.allclose(mine, meth))
+ assert np.all(compare_list)
+
+ def test_points(self):
+ """
+ Test that the points are correct.
+ """
+ nptest.assert_array_almost_equal(self.points,
+ util.meshgrid_ndim(self.interior_and_layer1))
+
+class center_and_layer1_points_double(center_and_layer1_points):
+ """
+ Provides set up for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` when
+ r_ratio is a double.
+ """
+ def setUp(self):
+ self.r_ratio = 0.2
+ super(center_and_layer1_points_double, self).setUp()
+
+class center_and_layer1_points_list(center_and_layer1_points):
+ """
+ Provides set up for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` when
+ r_ratio is a list.
+ """
+ def setUp(self):
+ self.r_ratio = 0.2*np.ones(self.mdim)
+ super(center_and_layer1_points_list, self).setUp()
+
+
+class center_and_layer1_points_binsize(center_and_layer1_points):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points_binsize`
+ """
+ def setUp(self):
+ """
+ Set up the problem for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points`
+ """
+ self.r_ratio = self.r_size/self.sur_domain[:,1]
+ super(center_and_layer1_points_binsize, self).create_output()
+ output = vHist.center_and_layer1_points_binsize(self.center_pts_per_edge,
+ self.center, self.r_size, self.sur_domain)
+ self.points, self.interior_and_layer1_VH, self.rect_domain_VH = output
+
+
+class center_and_layer1_points_binsize_list(center_and_layer1_points_binsize):
+ """
+ Provides set up for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points_binsize` when
+ r_size is a list.
+ """
+ def setUp(self):
+ self.r_size = self.sur_domain[:,1]*.2
+ super(center_and_layer1_points_binsize_list, self).setUp()
+
+class center_and_layer1_points_binsize_double(center_and_layer1_points_binsize):
+ """
+ Provides set up for
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points_binsize` when
+ r_size is a double.
+ """
+ def setUp(self):
+ self.r_size = self.sur_domain[0,1]*.2
+ super(center_and_layer1_points_binsize_double, self).setUp()
+
+class test_calp_list_1D(domain_1D, center_and_layer1_points_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 1D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calp_list_1D, self).createDomain()
+ super(test_calp_list_1D, self).setUp()
+class test_calp_list_2D(domain_2D, center_and_layer1_points_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 2D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calp_list_2D, self).createDomain()
+ super(test_calp_list_2D, self).setUp()
+class test_calp_list_3D(domain_3D, center_and_layer1_points_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 3D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calp_list_3D, self).createDomain()
+ super(test_calp_list_3D, self).setUp()
+
+class test_calp_double_1D(domain_1D, center_and_layer1_points_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 1D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calp_double_1D, self).createDomain()
+ super(test_calp_double_1D, self).setUp()
+class test_calp_double_2D(domain_2D, center_and_layer1_points_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 2D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calp_double_2D, self).createDomain()
+ super(test_calp_double_2D, self).setUp()
+class test_calp_double_3D(domain_3D, center_and_layer1_points_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 3D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calp_double_3D, self).createDomain()
+ super(test_calp_double_3D, self).setUp()
+
+
+class test_calps_list_1D(domain_1D, center_and_layer1_points_binsize_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 1D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calps_list_1D, self).createDomain()
+ super(test_calps_list_1D, self).setUp()
+class test_calps_list_2D(domain_2D, center_and_layer1_points_binsize_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 2D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calps_list_2D, self).createDomain()
+ super(test_calps_list_2D, self).setUp()
+class test_calps_list_3D(domain_3D, center_and_layer1_points_binsize_list):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 3D
+ domain with r_ratio as a list.
+ """
+ def setUp(self):
+ super(test_calps_list_3D, self).createDomain()
+ super(test_calps_list_3D, self).setUp()
+
+class test_calps_double_1D(domain_1D, center_and_layer1_points_binsize_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 1D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calps_double_1D, self).createDomain()
+ super(test_calps_double_1D, self).setUp()
+class test_calps_double_2D(domain_2D, center_and_layer1_points_binsize_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 2D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calps_double_2D, self).createDomain()
+ super(test_calps_double_2D, self).setUp()
+class test_calps_double_3D(domain_3D, center_and_layer1_points_binsize_double):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.center_and_layer1_points` for a 3D
+ domain with r_ratio as a double.
+ """
+ def setUp(self):
+ super(test_calps_double_3D, self).createDomain()
+ super(test_calps_double_3D, self).setUp()
+
+class edges(object):
+ """
+ Provides a method to test that the dimensions of the output (and the
+ output itself) from methods with the pattern
+ ``bet.calculateP.voronoiHistogram.edges_*`` are correct.
+ """
+ def test_dimensions(self):
+ """
+ Compares the dimensions of the output from
+ ``bet.calculateP.voronoiHistogram.edges_*``.
+ """
+ compare_dim = list()
+ for edge, center_ppe in zip(self.rect_and_sur_edges,
+ self.center_pts_per_edge):
+ compare_dim.append(len(edge) == center_ppe+3)
+ assert np.all(compare_dim)
+
+ def test_output(self):
+ """
+ Compares the output from ``bet.calcuateP.voronoiHistogram.edges_*``
+ with a known solution
+ """
+ compare_dim = list()
+ for edge, edgeVH in zip(self.my_edges, self.rect_and_sur_edges):
+ compare_dim.append(np.allclose(edge, edgeVH))
+ assert np.all(compare_dim)
+
+
+class edges_regular(edges):
+ """
+ Test :meth:`bet.calculateP.voronoiHistogram.edges_regular`
+ """
+ def create_output(self):
+ sur_width = self.sur_domain[:,1]-self.sur_domain[:,0]
+ rect_width = 0.2*sur_width
+ rect_domain = np.empty(self.sur_domain.shape)
+ rect_domain[:, 0] = self.center - .5*rect_width
+ rect_domain[:, 1] = self.center + .5*rect_width
+ self.my_edges = list()
+ for dim in xrange(self.sur_domain.shape[0]):
+ int_l1 = np.linspace(rect_domain[dim, 0], rect_domain[dim, 1],
+ self.center_pts_per_edge[dim]+1)
+ int_l2 = np.empty((int_l1.shape[0]+2,))
+ int_l2[1:-1] = int_l1
+ int_l2[0] = self.sur_domain[dim, 0]
+ int_l2[-1] = self.sur_domain[dim, 1]
+ self.my_edges.append(int_l2)
+ self.rect_domain = rect_domain
+
+ def setUp(self):
+ self.create_output()
+ self.rect_and_sur_edges = vHist.edges_regular(self.center_pts_per_edge,
+ self.rect_domain, self.sur_domain)
+
+
+class test_er_1D(domain_1D, edges_regular):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_regular` for a 1D
+ domain.
+ """
+ def setUp(self):
+ super(test_er_1D, self).createDomain()
+ super(test_er_1D, self).setUp()
+class test_er_2D(domain_2D, edges_regular):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_regular` for a 2D
+ domain .
+ """
+ def setUp(self):
+ super(test_er_2D, self).createDomain()
+ super(test_er_2D, self).setUp()
+class test_er_3D(domain_3D, edges_regular):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_regular` for a 3D
+ domain.
+ """
+ def setUp(self):
+ super(test_er_3D, self).createDomain()
+ super(test_er_3D, self).setUp()
+
+class edges_from_points(edges):
+ """
+ Test :meth:`bet.calculateP.edges_from_points`
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ points = list()
+ self.my_edges = list()
+ for dim in xrange(self.mdim):
+ points_dim = np.linspace(self.sur_domain[dim, 0],
+ self.sur_domain[dim, 1], 4)
+ points.append(points_dim)
+ self.my_edges.append((points_dim[1:]+points_dim[:-1])/2)
+ self.rect_and_sur_edges = vHist.edges_from_points(points)
+
+ def test_dimensions(self):
+ """
+ Test dimensions of :meth:`bet.calculateP.edges_from_points`
+ """
+ compare_dim = list()
+ for edge, my_edge in zip(self.rect_and_sur_edges,
+ self.my_edges):
+ compare_dim.append(len(edge) == len(my_edge))
+ assert np.all(compare_dim)
+
+class test_efp_1D(domain_1D, edges_from_points):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_from_points` for a 1D
+ domain.
+ """
+ def setUp(self):
+ super(test_efp_1D, self).createDomain()
+ super(test_efp_1D, self).setUp()
+class test_efp_2D(domain_2D, edges_from_points):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_from_points` for a 2D
+ domain.
+ """
+ def setUp(self):
+ super(test_efp_2D, self).createDomain()
+ super(test_efp_2D, self).setUp()
+class test_efp_3D(domain_3D, edges_from_points):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.edges_from_points` for a 3D
+ domain.
+ """
+ def setUp(self):
+ super(test_efp_3D, self).createDomain()
+ super(test_efp_3D, self).setUp()
+
+class histogramdd_volumes(object):
+ """
+ Test :meth:`bet.calculateP.voronoiHistogram.histogramdd_volumes`
+ """
+ def setUp(self):
+ points = list()
+ self.edges = list()
+ for dim in xrange(self.mdim):
+ points_dim = np.linspace(self.sur_domain[dim, 0],
+ self.sur_domain[dim, 1], 4)
+ points.append(points_dim[1:-1])
+ self.edges.append((points_dim[1:]+points_dim[:-1])/2.0)
+ self.points = util.meshgrid_ndim(points)
+ self.H, _ = np.histogramdd(self.points, self.edges, normed=True)
+ volume = 1.0/(self.H*(2.0**self.mdim))
+ self.volume = volume.ravel()
+ output = vHist.histogramdd_volumes(self.edges, self.points)
+ self.o_H, self.o_volume, self.o_edges = output
+
+ def test_dimensions_H(self):
+ """
+ Test the dimensions of H from
+ :meth:`bet.calculateP.histogramdd_volumes``
+ """
+ assert self.H.shape == self.o_H.shape
+
+ def test_dimensions_volume(self):
+ """
+ Test the dimensions of volume from
+ :meth:`bet.calculateP.histogramdd_volumes``
+ """
+ assert self.volume.shape == self.o_volume.shape
+
+ def test_dimensions_edges(self):
+ """
+ Test the dimensions of edges from
+ :meth:`bet.calculateP.histogramdd_volumes``
+ """
+ compare_dim = list()
+ for edge, my_edge in zip(self.o_edges, self.edges):
+ compare_dim.append(len(edge) == len(my_edge))
+ assert np.all(compare_dim)
+
+ def test_H_nonnegative(self):
+ """
+ Test that H from :meth:`bet.calculateP.histogramdd_volumes``
+ is nonnegative.
+ """
+ assert np.all(np.less(0.0, self.o_H))
+
+ def test_volumes_nonnegative(self):
+ """
+ Test that volume from :meth:`bet.calculateP.histogramdd_volumes``
+ is nonnegative.
+ """
+ assert np.all(np.less(0.0, self.o_volume))
+
+ def test_H(self):
+ """
+ Test that H from :meth:`bet.calculateP.histogramdd_volumes``
+ is correct.
+ """
+ assert np.allclose(self.H, self.o_H)
+
+ def test_volume(self):
+ """
+ Test that volume from :meth:`bet.calculateP.histogramdd_volumes``
+ is correct.
+ """
+ assert np.allclose(self.volume, self.o_volume)
+
+ def test_edges(self):
+ """
+ Test that the edges from :meth:`bet.calculateP.histogramdd_volumes``
+ are correct.
+ """
+ compare_dim = list()
+ for edge, my_edge in zip(self.o_edges, self.edges):
+ compare_dim.append(np.allclose(edge, my_edge))
+ assert np.all(compare_dim)
+
+class test_hddv_1D(domain_1D, histogramdd_volumes):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.histogramdd_volumes` for a 1D
+ domain.
+ """
+ def setUp(self):
+ super(test_hddv_1D, self).createDomain()
+ super(test_hddv_1D, self).setUp()
+class test_hddv_2D(domain_2D, histogramdd_volumes):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.histogramdd_volumes` for a 2D
+ domain.
+ """
+ def setUp(self):
+ super(test_hddv_2D, self).createDomain()
+ super(test_hddv_2D, self).setUp()
+class test_hddv_3D(domain_3D, histogramdd_volumes):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.histogramdd_volumes` for a 3D
+ domain.
+ """
+ def setUp(self):
+ super(test_hddv_3D, self).createDomain()
+ super(test_hddv_3D, self).setUp()
+
+class simple_fun_uniform(prob_uniform):
+ """
+ Test :meth:'bet.calculateP.voronoiHistogram.simple_fun_uniform`
+ """
+ def setUp(self):
+ """
+ Set up the problem
+ """
+ points = list()
+ edges = list()
+ self.rect_domain = np.empty((self.mdim, 2))
+ for dim in xrange(self.mdim):
+ points_dim = np.linspace(self.sur_domain[dim, 0],
+ self.sur_domain[dim, 1], 4)
+ points.append(points_dim[1:-1])
+ edge = (points_dim[1:]+points_dim[:-1])/2.0
+ edges.append(edge)
+ self.rect_domain[dim, :] = edge[[0, -1]]
+ points = util.meshgrid_ndim(points)
+ H, _ = np.histogramdd(points, edges, normed=True)
+ volume = 1.0/(H*(2.0**self.mdim))
+ volumes = volume.ravel()
+ output = vHist.simple_fun_uniform(points, volumes, self.rect_domain)
+ self.rho_D_M, self.d_distr_samples, self.d_Tree = output
+
+class test_sfu_1D(domain_1D, simple_fun_uniform):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.simple_fun_uniform` for a 1D
+ domain.
+ """
+ def setUp(self):
+ super(test_sfu_1D, self).createDomain()
+ super(test_sfu_1D, self).setUp()
+class test_sfu_2D(domain_2D, simple_fun_uniform):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.simple_fun_uniform` for a 2D
+ domain.
+ """
+ def setUp(self):
+ super(test_sfu_2D, self).createDomain()
+ super(test_sfu_2D, self).setUp()
+class test_sfu_3D(domain_3D, simple_fun_uniform):
+ """
+ Test
+ :meth:`bet.calculateP.voronoiHistogram.simple_fun_uniform` for a 3D
+ domain.
+ """
+ def setUp(self):
+ super(test_sfu_3D, self).createDomain()
+ super(test_sfu_3D, self).setUp()
diff --git a/test/test_loadBalance/__init__.py b/test/test_loadBalance/__init__.py
deleted file mode 100644
index e373a35c..00000000
--- a/test/test_loadBalance/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""
-This subpackage contains the tests for the loadBalance subpackage. The user
-should add tests for custom :class:`bet.loadBalance` classes.
-"""
-__all__ = ['test_load_balance', 'test_lb_DIAMOND', 'test_lb_PADCIRC']
diff --git a/test/test_loadBalance/test_lb_DIAMOND.py b/test/test_loadBalance/test_lb_DIAMOND.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/test/test_loadBalance/test_lb_PADCIRC.py b/test/test_loadBalance/test_lb_PADCIRC.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/test/test_loadBalance/test_load_balance.py b/test/test_loadBalance/test_load_balance.py
deleted file mode 100644
index ea7d0616..00000000
--- a/test/test_loadBalance/test_load_balance.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""
-This is module provides tests for :module:`bet.loadBalance.load_balance`.
-
- * check the dimension of the output and input
- * output/input file presence (this is model specific...)
- * optimal(?) feeding jobs out to the allocation correctly
-"""
-pass
diff --git a/test/test_postProcess/test_plotDomains.py b/test/test_postProcess/test_plotDomains.py
new file mode 100644
index 00000000..68a00d77
--- /dev/null
+++ b/test/test_postProcess/test_plotDomains.py
@@ -0,0 +1,292 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Lindley Graham 04/07/2015
+"""
+This module contains tests for :module:`bet.postProcess.plotDomains`.
+
+
+Tests for the execution of plotting parameter and data domains.
+"""
+
+import unittest, os, glob, bet
+import bet.postProcess.plotDomains as plotDomains
+import bet.util as util
+import matplotlib.tri as tri
+from matplotlib.lines import Line2D
+import numpy as np
+import numpy.testing as nptest
+from bet.Comm import size
+
+local_path = os.path.join(os.path.dirname(bet.__file__),
+ "../test/test_sampling")
+
+@unittest.skipIf(size > 1, 'Only run in serial')
+class test_plotDomains(unittest.TestCase):
+ """
+ Test :meth:`bet.postProcess.plotP.calculate_1D_marginal_probs` and
+ :meth:`bet.postProcess.plotP.calculate_2D_marginal_probs` for a 2D
+ parameter space.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.lam_domain = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
+ self.samples = util.meshgrid_ndim((np.linspace(self.lam_domain[0][0],
+ self.lam_domain[0][1], 10), np.linspace(self.lam_domain[1][0],
+ self.lam_domain[1][1], 10), np.linspace(self.lam_domain[1][0],
+ self.lam_domain[1][1], 10), np.linspace(self.lam_domain[1][0],
+ self.lam_domain[1][1], 10)))
+ self.data = self.samples*3.0
+ self.P_samples = (1.0/float(self.samples.shape[0]))*np.ones((self.samples.shape[0],))
+ self.filename = "testfigure"
+
+ QoI_range = np.array([3.0, 3.0, 3.0, 3.0])
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function.
+ :param outputs: outputs
+ :type outputs: :class:`numpy.ndarray` of shape (N, ndim)
+ :rtype: :class:`numpy.ndarray` of shape (N,)
+ :returns: 0 if outside of set or positive number if inside set
+ """
+ left = np.repeat([Q_ref-.5*bin_size], outputs.shape[0], 0)
+ right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0)
+ left = np.all(np.greater_equal(outputs, left), axis=1)
+ right = np.all(np.less_equal(outputs, right), axis=1)
+ inside = np.logical_and(left, right)
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+ self.rho_D = ifun
+ self.lnums = [1, 2, 3]
+ self.markers = []
+ for m in Line2D.markers:
+ try:
+ if len(m) == 1 and m != ' ':
+ self.markers.append(m)
+ except TypeError:
+ pass
+ self.colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
+
+ def tearDown(self):
+ """
+ Tear Down problem
+ """
+ # remove any files the we create
+ filenames = glob.glob(self.filename+".*")
+ filenames.extend(glob.glob('param_samples_*cs.eps'))
+ filenames.extend(glob.glob('data_samples_*cs.eps'))
+
+ filenames.extend(glob.glob(self.filename+".*"))
+ filenames.extend(glob.glob( 'param_samples_*cs.eps'))
+ filenames.extend(glob.glob(os.path.join(local_path,
+ 'data_samples_*cs.eps')))
+
+
+ filenames.append('domain_q1_q2_cs.eps')
+ filenames.append('domain_q1_q1_cs.eps')
+ filenames.append('q1_q2_domain_Q_cs.eps')
+ filenames.append('q1_q1_domain_Q_cs.eps')
+ figfiles = glob.glob('figs/*')
+ figfiles.extend(glob.glob(os.path.join(local_path, 'figs/*')))
+ filenames.extend(figfiles)
+
+ for f in filenames:
+ if os.path.exists(os.path.join(local_path, f)):
+ os.remove(os.path.join(local_path, f))
+ if os.path.exists(f):
+ os.remove(f)
+ if os.path.exists("figs"):
+ os.rmdir("figs")
+
+ def test_scatter_2D(self):
+ """
+ Test :meth:`bet.postProcess.plotDomains.scatter_2D`
+ """
+ sample_nos = [None, 25]
+ p_ref = [None, self.samples[4, [0, 1]]]
+ for sn, pr in zip(sample_nos, p_ref):
+ self.check_scatter_2D(sn, pr, True)
+
+ def check_scatter_2D(self, sample_nos, p_ref, save):
+ """
+
+ Check to see that the :meth:`bet.postTools.plotDomains.scatter_2D` ran
+ without generating an error.
+ """
+ try:
+ plotDomains.scatter_2D(self.samples[:, [0, 1]], sample_nos,
+ self.P_samples, p_ref, save, False, 'XLABEL', 'YLABEL',
+ self.filename)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_scatter_3D(self):
+ """
+ Test :meth:`bet.postProcess.plotDomains.scatter_3D`
+ """
+ sample_nos = [None, 25]
+ p_ref = [None, self.samples[4, :]]
+ for sn, pr in zip(sample_nos, p_ref):
+ self.check_scatter_3D(sn, pr, True)
+
+ def check_scatter_3D(self, sample_nos, p_ref, save):
+ """
+ Check to see that the :meth:`bet.postTools.plotDomains.scatter_3D` ran
+ without generating an error.
+ """
+ try:
+ plotDomains.scatter_3D(self.samples[:, [0, 1, 2]], sample_nos,
+ self.P_samples, p_ref, save, False, 'XLABEL', 'YLABEL',
+ 'ZLABEL', self.filename)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_show_param(self):
+ """
+ Test :meth:`bet.postProcess.plotDomains.show_param`
+ """
+ sample_nos = [None, 25]
+ samples = [self.samples, self.samples[:, [0, 1]],
+ self.samples[:, [0, 1, 2]]]
+ lnums = [None, self.lnums]
+
+ for sample in samples:
+ showdim = [None]
+ if sample.shape[0] > 2:
+ showdim.append(2)
+ if sample.shape[0] > 3:
+ showdim.append(3)
+ for sd in showdim:
+ p_ref = [None, sample[4, :]]
+ for ln, sn, pr in zip(lnums, sample_nos, p_ref):
+ self.check_show_param(sample, sn, pr, True, ln, sd)
+
+ def check_show_param(self, samples, sample_nos, p_ref, save, lnums,
+ showdim):
+ """
+ Check to see that the :meth:`bet.postTools.plotDomains.show_param` ran
+ without generating an error.
+ """
+ try:
+ plotDomains.show_param(samples, self.data, self.rho_D, p_ref,
+ sample_nos, save, False, lnums, showdim)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_show_data(self):
+ """
+ Test :meth:`bet.postProcess.plotDomains.show_data`
+ """
+ sample_nos = [None, 25]
+ data_sets = [self.data, self.data[:, [0, 1]]]
+ qnums = [None, [0, 1, 2]]#self.lnums]
+
+ for data, qn, sn in zip(data_sets, qnums, sample_nos):
+ showdim = [None]
+ if data.shape[0] > 2:
+ showdim.append(2)
+ if data.shape[0] > 3:
+ showdim.append(3)
+ Q_ref = [None, data[4, :]]
+ for sd, qr in zip(showdim, Q_ref):
+ self.check_show_data(data, sn, qr, True, qn, sd)
+
+ def check_show_data(self, data, sample_nos, q_ref, save, qnums, showdim):
+ """
+ Check to see that the :meth:`bet.postTools.plotDomains.show_data` ran
+ without generating an error.
+ """
+ try:
+ if data.shape[1] == 4:
+ plotDomains.show_data(data, self.rho_D, q_ref,
+ sample_nos, save, False, qnums, showdim)
+ else:
+ plotDomains.show_data(data, None, q_ref,
+ sample_nos, save, False, qnums, showdim)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ print "ERROR"
+ print data.shape
+ print q_ref
+ print sample_nos
+ print save
+ print qnums
+ print showdim
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_show_data_domain_2D(self):
+ """
+ Test :meth:`bet.postProces.plotDomains.show_data_domain_2D`
+ """
+ ref_markers = [None, self.markers]
+ ref_colors = [None, self.colors]
+ filenames = [None, ['domain_q1_q1_cs.eps', 'q1_q2_domain_Q_cs.eps']]
+
+ for rm, rc, fn in zip(ref_markers, ref_colors, filenames):
+ self.check_show_data_domain_2D(rm, rc, None, True, fn)
+
+ def check_show_data_domain_2D(self, ref_markers, ref_colors, triangles,
+ save, filenames):
+ """
+ Check to see that the
+ :meth:`bet.postTools.plotDomains.show_data_domain_2D` ran
+ without generating an error.
+ """
+ Q_ref = self.data[:, [0, 1]]
+ Q_ref = Q_ref[[1,4],:]
+ print Q_ref.shape
+ data = self.data[:, [0, 1]]
+ try:
+ plotDomains.show_data_domain_2D(self.samples, data, Q_ref,
+ ref_markers, ref_colors, triangles=triangles, save=save,
+ filenames=filenames)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_show_data_domain_multi(self):
+ """
+ Test :meth:`bet.postProcess.plotDomains.show_data_domain_multi`
+ """
+ if not os.path.exists('figs/'):
+ os.mkdir('figs/')
+ Q_nums = [None, [1, 2], [1, 2, 3]]
+ ref_markers = [None, self.markers]
+ ref_colors = [None, self.colors]
+ for rm, rc in zip(ref_markers, ref_colors):
+ for qn in Q_nums:
+ showdim = [None, 1]
+ if qn and len(qn) > 2:
+ showdim.extend(['all', 'ALL'])
+ for sd in showdim:
+ self.check_show_data_domain_multi(rm, rc, qn, sd)
+
+ def check_show_data_domain_multi(self, ref_markers, ref_colors, Q_nums,
+ showdim):
+ """
+ Check to see that the
+ :meth:`bet.postTools.plotDomains.show_data_domain_multi` ran
+ without generating an error.
+ """
+ Q_ref = self.data[[4, 2], :]
+ try:
+ plotDomains.show_data_domain_multi(self.samples, self.data,
+ Q_ref, Q_nums, ref_markers=ref_markers,
+ ref_colors=ref_colors, showdim=showdim)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
diff --git a/test/test_postProcess/test_plotP.py b/test/test_postProcess/test_plotP.py
new file mode 100644
index 00000000..3d244831
--- /dev/null
+++ b/test/test_postProcess/test_plotP.py
@@ -0,0 +1,193 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Steven Mattis 04/07/2015
+"""
+This module contains tests for :module:`bet.postProcess.plotP`.
+
+
+Tests for correct computation of marginals and plotting.
+"""
+
+import unittest
+import bet.calculateP.calculateP as calcP
+import bet.calculateP.simpleFunP as simpleFunP
+import bet.postProcess.plotP as plotP
+import numpy as np
+import scipy.spatial as spatial
+import numpy.testing as nptest
+import bet.util as util
+from bet.Comm import *
+import os
+
+
+class Test_calc_marg_1D(unittest.TestCase):
+ """
+ Test :meth:`bet.postProcess.plotP.calculate_1D_marginal_probs`
+ for a 1D parameter space.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.lam_domain=np.array([[0.0,1.0]])
+ num_samples=1000
+ self.samples = np.linspace(self.lam_domain[0][0], self.lam_domain[0][1], num_samples+1)
+ self.P_samples = 1.0/float(size)*(1.0/float(self.samples.shape[0]))*np.ones((self.samples.shape[0],))
+
+ def test_1_bin(self):
+ """
+ Test that marginals sum to 1 and have correct shape.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 1)
+ nptest.assert_almost_equal(marginals[0][0], 1.0)
+ nptest.assert_equal(marginals[0].shape, (1,))
+
+ def test_10_bins(self):
+ """
+ Test that marginals sum to 1 and have correct shape.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ nptest.assert_almost_equal(np.sum(marginals[0]), 1.0)
+ nptest.assert_equal(marginals[0].shape, (10,))
+
+class Test_calc_marg_2D(unittest.TestCase):
+ """
+ Test :meth:`bet.postProcess.plotP.calculate_1D_marginal_probs` and :meth:`bet.postProcess.plotP.calculate_2D_marginal_probs` for a 2D
+ parameter space.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.lam_domain=np.array([[0.0,1.0],[0.0,1.0]])
+ self.samples=util.meshgrid_ndim((np.linspace(self.lam_domain[0][0], self.lam_domain[0][1], 10),np.linspace(self.lam_domain[1][0], self.lam_domain[1][1], 10)))
+ self.P_samples = 1.0/float(size)*(1.0/float(self.samples.shape[0]))*np.ones((self.samples.shape[0],))
+
+ def test_1_bin_1D(self):
+ """
+ Test that 1D marginals sum to 1 and have right shape.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 1)
+
+ nptest.assert_almost_equal(marginals[0][0], 1.0)
+ nptest.assert_almost_equal(marginals[1][0], 1.0)
+ nptest.assert_equal(marginals[0].shape, (1,))
+ nptest.assert_equal(marginals[1].shape, (1,))
+
+ def test_10_bins_1D(self):
+ """
+ Test that 1D marginals sum to 1 and have right shape.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ nptest.assert_almost_equal(np.sum(marginals[0]), 1.0)
+ nptest.assert_almost_equal(np.sum(marginals[1]), 1.0)
+ nptest.assert_equal(marginals[0].shape, (10,))
+
+ def test_1_bin_2D(self):
+ """
+ Test that 2D marginals sum to 1 and have right shape.
+ """
+ (bins, marginals) = plotP.calculate_2D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 1)
+
+ nptest.assert_almost_equal(marginals[(0,1)][0], 1.0)
+ nptest.assert_equal(marginals[(0,1)].shape, (1,1))
+
+ def test_10_bins_2D(self):
+ """
+ Test that 2D marginals sum to 1 and have right shape.
+ """
+ (bins, marginals) = plotP.calculate_2D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ nptest.assert_almost_equal(np.sum(marginals[(0,1)]), 1.0)
+ nptest.assert_equal(marginals[(0,1)].shape, (10,10))
+
+ def test_5_10_bins_2D(self):
+ """
+ Test that 1D marginals sum to 1 and have right shape.
+ """
+ (bins, marginals) = plotP.calculate_2D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = [5,10])
+ nptest.assert_almost_equal(np.sum(marginals[(0,1)]), 1.0)
+ nptest.assert_equal(marginals[(0,1)].shape, (5,10))
+
+
+ def test_1D_smoothing(self):
+ """
+ Test :meth:`bet.postProcess.plotP.smooth_marginals_1D`.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ marginals_smooth = plotP.smooth_marginals_1D(marginals, bins, sigma = 10.0)
+ nptest.assert_equal(marginals_smooth[0].shape, marginals[0].shape)
+ nptest.assert_almost_equal(np.sum(marginals_smooth[0]), 1.0)
+
+ def test_2D_smoothing(self):
+ """
+ Test :meth:`bet.postProcess.plotP.smooth_marginals_2D`.
+ """
+ (bins, marginals) = plotP.calculate_2D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ marginals_smooth = plotP.smooth_marginals_2D(marginals, bins, sigma = 10.0)
+ nptest.assert_equal(marginals_smooth[(0,1)].shape, marginals[(0,1)].shape)
+ nptest.assert_almost_equal(np.sum(marginals_smooth[(0,1)]), 1.0)
+
+ def test_plot_marginals_1D(self):
+ """
+ Test :meth:`bet.postProcess.plotP.plot_1D_marginal_probs`.
+ """
+ (bins, marginals) = plotP.calculate_1D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ try:
+ plotP.plot_1D_marginal_probs(marginals, bins,self.lam_domain, filename = "file", interactive=False)
+ go = True
+ if os.path.exists("file_1D_0.eps"):
+ os.remove("file_1D_0.eps")
+ if os.path.exists("file_1D_1.eps"):
+ os.remove("file_1D_1.eps")
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+ def test_plot_marginals_2D(self):
+ """
+ Test :meth:`bet.postProcess.plotP.plot_2D_marginal_probs`.
+ """
+ (bins, marginals) = plotP.calculate_2D_marginal_probs(self.P_samples,
+ self.samples,
+ self.lam_domain,
+ nbins = 10)
+ marginals[(0,1)][0][0]=0.0
+ marginals[(0,1)][0][1]*=2.0
+ try:
+ plotP.plot_2D_marginal_probs(marginals, bins,self.lam_domain, filename = "file", interactive=False)
+ go = True
+ if os.path.exists("file_2D_0_1.eps"):
+ os.remove("file_2D_0_1.eps")
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
diff --git a/test/test_postProcess/test_postTools.py b/test/test_postProcess/test_postTools.py
new file mode 100644
index 00000000..0adc9a30
--- /dev/null
+++ b/test/test_postProcess/test_postTools.py
@@ -0,0 +1,137 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Steven Mattis 04/07/2015
+"""
+This module contains tests for :module:`bet.postProcess.postTools`.
+
+
+Tests for correct post-processing.
+"""
+import unittest
+import bet.calculateP.simpleFunP as simpleFunP
+import bet.postProcess.postTools as postTools
+import numpy as np
+import scipy.spatial as spatial
+import numpy.testing as nptest
+import bet.util as util
+from bet.Comm import *
+
+def test_in_high_prob():
+ """
+
+ Tests :meth:`bet.postProcess.postTools.in_high_prob`
+ """
+ def rho_D(my_data):
+ return my_data/4.0
+ data = np.array([0, 1, 0, 1, 1, 1])
+ maximum = np.max(rho_D(data))
+ print "maximum", maximum
+ assert 4 == postTools.in_high_prob(data, rho_D, maximum)
+ assert 3 == postTools.in_high_prob(data, rho_D, maximum, [3, 4, 5])
+ assert 2 == postTools.in_high_prob(data, rho_D, maximum, [0, 1, 2, 3])
+ assert 1 == postTools.in_high_prob(data, rho_D, maximum, [0, 2, 4])
+ assert 0 == postTools.in_high_prob(data, rho_D, maximum, [0, 2])
+
+def test_in_high_prob_multi():
+ """
+
+ Tests :meth:`bet.postProcess.postTools.in_high_prob_multi`
+
+ """
+ def rho_D(my_data):
+ return my_data/4.0
+ data1 = np.array([0, 1, 0, 1, 1, 0])
+ data2 = np.ones(data1.shape)-data1
+ maximum = np.max(rho_D(data1))
+
+ print "maximum", maximum
+ results_list = [[None, data1], [None, data2], [None, data1], [None, data2]]
+ sample_nos_list = [[3, 4, 5], [3, 4, 5], [0, 2, 4], [0, 2, 4]]
+
+ nptest.assert_array_equal(np.array([2, 1, 1, 2]),
+ postTools.in_high_prob_multi(results_list, rho_D, maximum,
+ sample_nos_list))
+ nptest.assert_array_equal(np.array([3, 3, 3, 3]),
+ postTools.in_high_prob_multi(results_list, rho_D, maximum))
+
+def test_compare_yield_CH():
+ """
+
+ Tests :meth:`bet.postProcess.postTools.compare_yield` with column headings
+
+ """
+ sample_quality = np.random.random((10,))
+ sort_ind = np.argsort(sample_quality)
+ run_param = []
+ for i in range(10):
+ run_param.append(np.random.random((4,)))
+ column_headings = ['swallow', 'coconut', 'ni', 'shrubbery']
+ try:
+ postTools.compare_yield(sort_ind, sample_quality, run_param,
+ column_headings)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+def test_compare_yield():
+ """
+
+ Tests :meth:`bet.postProcess.postTools.compare_yield` without column headings
+
+ """
+ sample_quality = np.random.random((10,))
+ sort_ind = np.argsort(sample_quality)
+ run_param = []
+ for i in range(10):
+ run_param.append(np.random.random((4,)))
+ try:
+ postTools.compare_yield(sort_ind, sample_quality, run_param)
+ go = True
+ except (RuntimeError, TypeError, NameError):
+ go = False
+ nptest.assert_equal(go, True)
+
+
+class Test_PostTools(unittest.TestCase):
+ """
+ Test :mod:`bet.postProcess.postTools`.
+ """
+ def setUp(self):
+ """
+ Set up problem.
+ """
+ self.lam_domain=np.array([[0.0,1.0]])
+ num_samples=1000
+ self.samples = np.linspace(self.lam_domain[0][0], self.lam_domain[0][1], num_samples+1)
+ self.P_samples = (1.0/float(self.samples.shape[0]))*np.ones((self.samples.shape[0],))
+ self.P_samples[0] = 0.0
+ self.P_samples[-1] *= 2.0
+
+ self.data = self.samples[:]
+
+ def test_sort_by_rho(self):
+ """
+ Test :meth:`bet.postProcess.postTools.sort_by_rho`.
+ """
+ (P_samples, samples, _ , data) = postTools.sort_by_rho(self.P_samples, self.samples,
+ lam_vol=None, data=self.data)
+ self.assertGreater(np.min(P_samples),0.0)
+ nptest.assert_almost_equal(np.sum(P_samples),1.0)
+
+ def test_sample_highest_prob(self):
+ """
+ Test :meth:`bet.postProcess.postTools.sample_highest_prob`.
+ """
+ (num_samples,P_samples, samples, _ , data) = postTools.sample_highest_prob(1.0,
+ self.P_samples,
+ self.samples,
+ lam_vol=None, data=self.data, sort=True)
+ nptest.assert_almost_equal(np.sum(P_samples),1.0)
+ nptest.assert_equal(num_samples,1000)
+
+ (num_samples,P_samples, samples, _ , data) = postTools.sample_highest_prob(0.8,
+ self.P_samples,
+ self.samples,
+ lam_vol=None, data=self.data, sort=True)
+ nptest.assert_allclose(np.sum(P_samples),0.8,0.001)
diff --git a/test/test_sampling/__init__.py b/test/test_sampling/__init__.py
index a45e59be..44d50e56 100644
--- a/test/test_sampling/__init__.py
+++ b/test/test_sampling/__init__.py
@@ -1,3 +1,5 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
"""
This subpackage contains the test modules for the sampling subpackage.
"""
diff --git a/test/test_sampling/test_adaptiveSampling.py b/test/test_sampling/test_adaptiveSampling.py
index cb417107..06ca8b14 100644
--- a/test/test_sampling/test_adaptiveSampling.py
+++ b/test/test_sampling/test_adaptiveSampling.py
@@ -1,8 +1,897 @@
-"""
-This module contains tests for the :mod:`bet.sampling.basicSampling` module.
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# -*- coding: utf-8 -*-
+# Lindley Graham 04/07/2015
- * check input/output dimensions and format
- * make sure that $\lambda_i \in \Lambda$
- * provide diagnoistic output (?)
- * check that step sizes are within a certain tolerance
"""
+This module contains unittests for :mod:`~bet.sampling.adaptiveSampling`
+"""
+
+import unittest, os, glob
+import numpy.testing as nptest
+import numpy as np
+import bet.sampling.adaptiveSampling as asam
+import scipy.io as sio
+from bet.Comm import *
+import bet
+
+local_path = os.path.join(os.path.dirname(bet.__file__),
+ "../test/test_sampling")
+
+@unittest.skipIf(size > 1, 'Only run in serial')
+def test_loadmat_init():
+ """
+ Tests :meth:`bet.sampling.adaptiveSampling.loadmat` and
+ :meth:`bet.sampling.adaptiveSampling.sampler.init`.
+ """
+ np.random.seed(1)
+ chain_length = 10
+ mdat1 = {'samples':np.random.random((50, 1)),
+ 'data':np.random.random((50, 1)), 'num_samples':50,
+ 'chain_length':chain_length}
+ mdat2 = {'samples':np.random.random((60, 1)),
+ 'num_samples':60, 'chain_length':chain_length}
+ model = "this is not a model"
+
+ num_samples = np.array([50, 60])
+ num_chains_pproc1, num_chains_pproc2 = np.ceil(num_samples/float(chain_length*size)).astype('int')
+ num_chains1, num_chains2 = size * np.array([num_chains_pproc1,
+ num_chains_pproc2])
+ num_samples1, num_samples2 = chain_length * np.array([num_chains1,
+ num_chains2])
+
+ sio.savemat(os.path.join(local_path, 'testfile1'), mdat1)
+ sio.savemat(os.path.join(local_path, 'testfile2'), mdat2)
+
+ (loaded_sampler1, samples1, data1) = asam.loadmat(os.path.join(local_path,
+ 'testfile1'))
+ nptest.assert_array_equal(samples1, mdat1['samples'])
+ nptest.assert_array_equal(data1, mdat1['data'])
+ assert loaded_sampler1.num_samples == num_samples1
+ assert loaded_sampler1.chain_length == chain_length
+ assert loaded_sampler1.num_chains_pproc == num_chains_pproc1
+ assert loaded_sampler1.num_chains == num_chains1
+ nptest.assert_array_equal(np.repeat(range(num_chains1), chain_length, 0),
+ loaded_sampler1.sample_batch_no)
+ assert loaded_sampler1.lb_model == None
+
+ (loaded_sampler2, samples2, data2) = asam.loadmat(os.path.join(local_path,
+ 'testfile2'), model)
+ nptest.assert_array_equal(samples2, mdat2['samples'])
+ nptest.assert_array_equal(data2, None)
+ assert loaded_sampler2.num_samples == num_samples2
+ assert loaded_sampler2.chain_length == chain_length
+ assert loaded_sampler2.num_chains_pproc == num_chains_pproc2
+ assert loaded_sampler2.num_chains == num_chains2
+ nptest.assert_array_equal(np.repeat(range(num_chains2), chain_length, 0),
+ loaded_sampler2.sample_batch_no)
+ if os.path.exists(os.path.join(local_path, 'testfile1.mat')):
+ os.remove(os.path.join(local_path, 'testfile1.mat'))
+ if os.path.exists(os.path.join(local_path, 'testfile2.mat')):
+ os.remove(os.path.join(local_path, 'testfile2.mat'))
+
+def verify_samples(QoI_range, sampler, param_min, param_max,
+ t_set, savefile, initial_sample_type):
+ """
+ Run :meth:`bet.sampling.adaptiveSampling.sampler.generalized_chains` and
+ verify that the samples have the correct dimensions and are containted in
+ the bounded parameter space.
+ """
+
+ # create indicator function
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ left = np.repeat([Q_ref-.5*bin_size], outputs.shape[0], 0)
+ right = np.repeat([Q_ref+.5*bin_size], outputs.shape[0], 0)
+ left = np.all(np.greater_equal(outputs, left), axis=1)
+ right = np.all(np.less_equal(outputs, right), axis=1)
+ inside = np.logical_and(left, right)
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+
+ # create rhoD_kernel
+ kernel_rD = asam.rhoD_kernel(maximum, ifun)
+
+ # run generalized chains
+ (samples, data, all_step_ratios) = sampler.generalized_chains(param_min,
+ param_max, t_set, kernel_rD, savefile, initial_sample_type)
+
+ # check dimensions of samples
+ assert samples.shape == (sampler.num_samples, len(param_min))
+
+ # are the samples in bounds?
+ param_left = np.repeat([param_min], sampler.num_samples, 0)
+ param_right = np.repeat([param_max], sampler.num_samples, 0)
+ assert np.all(samples <= param_right)
+ assert np.all(samples >= param_left)
+
+ # check dimensions of data
+ assert data.shape == (sampler.num_samples, len(QoI_range))
+
+ # check dimensions of all_step_ratios
+ assert all_step_ratios.shape == (sampler.num_chains, sampler.chain_length)
+
+ # are all the step ratios of an appropriate size?
+ assert np.all(all_step_ratios >= t_set.min_ratio)
+ assert np.all(all_step_ratios <= t_set.max_ratio)
+
+ # did the savefiles get created? (proper number, contain proper keys)
+ mdat = {}
+ if rank == 0:
+ mdat = sio.loadmat(savefile)
+ nptest.assert_array_equal(samples, mdat['samples'])
+ nptest.assert_array_equal(data, mdat['data'])
+ nptest.assert_array_equal(all_step_ratios, mdat['step_ratios'])
+ assert sampler.chain_length == mdat['chain_length']
+ assert sampler.num_samples == mdat['num_samples']
+ assert sampler.num_chains == mdat['num_chains']
+ nptest.assert_array_equal(sampler.sample_batch_no,
+ np.squeeze(mdat['sample_batch_no']))
+
+class Test_adaptive_sampler(unittest.TestCase):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.sampler`.
+ """
+
+ def setUp(self):
+ """
+ Set up
+ """
+
+ # create 1-1 map
+ self.param_min1 = np.zeros((1, ))
+ self.param_max1 = np.zeros((1, ))
+ def map_1t1(x):
+ """
+ 1 to 1 map
+ """
+ return x*2.0
+ # create 3-1 map
+ self.param_min3 = np.zeros((3, ))
+ self.param_max3 = np.ones((3, ))
+ def map_3t1(x):
+ """
+ 3 to 1 map
+ """
+ return np.expand_dims(np.sum(x, 1), axis=1)
+ # create 3-2 map
+ def map_3t2(x):
+ """
+ 3 to 2 map
+ """
+ return np.vstack(([x[:, 0]+x[:, 1], x[:, 2]])).transpose()
+ # create 10-4 map
+ self.param_min10 = np.zeros((10, ))
+ self.param_max10 = np.ones((10, ))
+ def map_10t4(x):
+ """
+ 10 to 4 map
+ """
+ x1 = x[:, 0] + x[:, 1]
+ x2 = x[:, 2] + x[:, 3]
+ x3 = x[:, 4] + x[:, 5]
+ x4 = np.sum(x[:, [6, 7, 8, 9]], 1)
+ return np.vstack([x1, x2, x3, x4]).transpose()
+ self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"]
+ self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4]
+ self.QoI_range = [np.array([2.0]), np.array([2.0]), np.array([3.0]),
+ np.array([2.0, 1.0]), np.array([2.0, 2.0, 2.0, 4.0])]
+
+ # define parameters for the adaptive sampler
+
+ num_samples = 1000
+ chain_length = 100
+ num_chains_pproc = int(np.ceil(num_samples/float(chain_length*size)))
+ num_chains = size * num_chains_pproc
+ num_samples = chain_length * np.array(num_chains)
+
+ self.samplers = []
+ for model in self.models:
+ self.samplers.append(asam.sampler(num_samples, chain_length,
+ model))
+
+ self.param_min_list = [self.param_min1, self.param_min1,
+ self.param_min3, self.param_min3, self.param_min10]
+ self.param_max_list = [self.param_max1, self.param_max1,
+ self.param_max3, self.param_max3, self.param_max10]
+
+ self.test_list = zip(self.models, self.QoI_range, self.samplers,
+ self.param_min_list, self.param_max_list, self.savefiles)
+
+
+ def tearDown(self):
+ for f in self.savefiles:
+ if rank == 0 and os.path.exists(f+".mat"):
+ os.remove(f+".mat")
+ proc_savefiles = glob.glob("p{}*.mat".format(rank))
+ proc_savefiles.extend(glob.glob("proc{}*.mat".format(rank)))
+ for pf in proc_savefiles:
+ if os.path.exists(pf):
+ os.remove(pf)
+
+ def test_update(self):
+ """
+ Test :meth:`bet.sampling.basicSampling.sampler.save`
+ """
+ mdict = {"frog":3, "moose":2}
+ self.samplers[0].update_mdict(mdict)
+ assert self.samplers[0].num_samples == mdict["num_samples"]
+ assert self.samplers[0].chain_length == mdict["chain_length"]
+ assert self.samplers[0].num_chains == mdict["num_chains"]
+ nptest.assert_array_equal(self.samplers[0].sample_batch_no,
+ np.repeat(range(self.samplers[0].num_chains),
+ self.samplers[0].chain_length, 0))
+
+ def test_run_gen(self):
+ """
+ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_gen` and verify
+ that the output has the correct dimensions.
+ """
+ # sampler.run_gen(kern_list, rho_D, maximum, param_min, param_max,
+ # t_set, savefile, initial_sample_type)
+ # returns list where each member is a tuple ((samples, data),
+ # all_step_ratios, num_high_prob_samples,
+ # sorted_indices_of_num_high_prob_samples, average_step_ratio)
+ # create indicator function
+ inputs = self.test_list[3]
+ _, QoI_range, sampler, param_min, param_max, savefile = inputs
+
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+
+ # create rhoD_kernel
+ kernel_rD = asam.rhoD_kernel(maximum, ifun)
+ kern_list = [kernel_rD]*2
+
+ # create t_set
+ t_set = asam.transition_set(.5, .5**5, 1.0)
+
+ # run run_gen
+ output = sampler.run_gen(kern_list, ifun, maximum, param_min,
+ param_max, t_set, savefile)
+
+ results, r_step_size, results_rD, sort_ind, mean_ss = output
+
+ for out in output:
+ assert len(out) == 2
+
+ for samples, data in results:
+ assert samples.shape == (sampler.num_samples, len(param_min))
+ assert data.shape == (sampler.num_samples, len(QoI_range))
+ for step_sizes in r_step_size:
+ assert step_sizes.shape == (sampler.num_chains,
+ sampler.chain_length)
+ for num_hps in results_rD:
+ assert type(num_hps) == int
+ for inds in sort_ind:
+ assert np.issubdtype(type(inds), int)
+ for asr in mean_ss:
+ assert asr > t_set.min_ratio
+ assert asr < t_set.max_ratio
+
+ def test_run_tk(self):
+ """
+ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_tk` and verify
+ that the output has the correct dimensions.
+ """
+ # sampler.run_tk(init_ratio, min_raio, max_ratio, rho_D, maximum,
+ # param_min, param_max, kernel, savefile, intial_sample_type)
+ # returns list where each member is a tuple ((samples, data),
+ # all_step_ratios, num_high_prob_samples,
+ # sorted_indices_of_num_high_prob_samples, average_step_ratio)
+ inputs = self.test_list[3]
+ _, QoI_range, sampler, param_min, param_max, savefile = inputs
+
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+
+ # create rhoD_kernel
+ kernel_rD = asam.rhoD_kernel(maximum, ifun)
+
+ # create t_set
+ init_ratio = [1.0, .5, .25]
+ min_ratio = [.5**2, .5**5, .5**7]
+ max_ratio = [1.0, .75, .5]
+
+ # run run_gen
+ output = sampler.run_tk(init_ratio, min_ratio, max_ratio, ifun, maximum, param_min,
+ param_max, kernel_rD, savefile)
+
+ results, r_step_size, results_rD, sort_ind, mean_ss = output
+
+ for out in output:
+ assert len(out) == 3
+
+ for samples, data in results:
+ assert samples.shape == (sampler.num_samples, len(param_min))
+ assert data.shape == (sampler.num_samples, len(QoI_range))
+ for step_sizes in r_step_size:
+ assert step_sizes.shape == (sampler.num_chains,
+ sampler.chain_length)
+ for num_hps in results_rD:
+ assert type(num_hps) == int
+ for inds in sort_ind:
+ assert np.issubdtype(type(inds), int)
+ for asr, mir, mar in zip(mean_ss, min_ratio, max_ratio):
+ assert asr > mir
+ assert asr < mar
+
+ def test_run_inc_dec(self):
+ """
+ Run :meth:`bet.sampling.adaptiveSampling.sampler.run_inc_dec` and verify
+ that the output has the correct dimensions.
+ """
+ # sampler.run_inc_dec(increase, decrease, tolerance, rho_D, maximum,
+ # param_min, param_max, t_set, savefile, initial_sample_type)
+ # returns list where each member is a tuple ((samples, data),
+ # all_step_ratios, num_high_prob_samples,
+ # sorted_indices_of_num_high_prob_samples, average_step_ratio)
+ inputs = self.test_list[3]
+ _, QoI_range, sampler, param_min, param_max, savefile = inputs
+
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+
+ # create rhoD_kernel
+ increase = [2.0, 3.0, 5.0]
+ decrease = [.7, .5, .2]
+ tolerance = [1e-3, 1e-4, 1e-7]
+
+ # create t_set
+ t_set = asam.transition_set(.5, .5**5, 1.0)
+
+ # run run_gen
+ output = sampler.run_inc_dec(increase, decrease, tolerance, ifun, maximum, param_min,
+ param_max, t_set, savefile)
+
+ results, r_step_size, results_rD, sort_ind, mean_ss = output
+
+ for out in output:
+ assert len(out) == 3
+
+ for samples, data in results:
+ assert samples.shape == (sampler.num_samples, len(param_min))
+ assert data.shape == (sampler.num_samples, len(QoI_range))
+ for step_sizes in r_step_size:
+ assert step_sizes.shape == (sampler.num_chains,
+ sampler.chain_length)
+ for num_hps in results_rD:
+ assert type(num_hps) == int
+ for inds in sort_ind:
+ assert np.issubdtype(type(inds), int)
+ for asr in mean_ss:
+ assert asr > t_set.min_ratio
+ assert asr < t_set.max_ratio
+
+ def test_generalized_chains(self):
+ """
+ Test :met:`bet.sampling.adaptiveSampling.sampler.generalized_chains`
+ for three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
+ """
+ # create a transition set
+ t_set = asam.transition_set(.5, .5**5, 1.0)
+
+ for _, QoI_range, sampler, param_min, param_max, savefile in self.test_list:
+ for initial_sample_type in ["random", "r", "lhs"]:
+ verify_samples(QoI_range, sampler, param_min, param_max, t_set,
+ savefile, initial_sample_type)
+
+class test_kernels(unittest.TestCase):
+ """
+ Tests kernels for a 1d, 2d, 4d data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ self.QoI_range = [np.array([3.0]),
+ np.array([2.0, 1.0]), np.array([2.0, 2.0, 2.0, 4.0])]
+
+ def test_list(self):
+ """
+ Run test for a 1d, 2d, and 4d data space.
+ """
+ for QoI_range in self.QoI_range:
+ Q_ref = QoI_range*0.5
+ bin_size = 0.15*QoI_range
+ maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+ self.verify_indiv(Q_ref, ifun, maximum)
+
+ def verify_indiv(self, Q_ref, rhoD, maximum):
+ """
+ Test that the list of kernels is correctly created.
+ """
+ kern_list = asam.kernels(Q_ref, rhoD, maximum)
+ assert len(kern_list) == 3
+ assert type(kern_list[0]) == asam.maxima_mean_kernel
+ assert type(kern_list[1]) == asam.rhoD_kernel
+ assert type(kern_list[2]) == asam.maxima_kernel
+
+class data_1D(object):
+ """
+ Sets up 1D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 1))*10.0
+ self.Q_ref = np.array([5.0])
+ self.data_domain = np.expand_dims(np.array([0.0, 10.0]), axis=0)
+ self.mdim = 1
+ bin_size = 0.15*self.data_domain[:, 1]
+ self.maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ self.Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ self.Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(self.maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+ self.rho_D = ifun
+
+class data_2D(object):
+ """
+ Sets up 2D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 2))*10.0
+ self.Q_ref = np.array([5.0, 5.0])
+ self.data_domain = np.array([[0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 2
+ bin_size = 0.15*self.data_domain[:, 1]
+ self.maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ self.Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ self.Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(self.maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+ self.rho_D = ifun
+
+
+class data_3D(object):
+ """
+ Sets up 3D data domain problem.
+ """
+ def createData(self):
+ """
+ Set up data.
+ """
+ self.data = np.random.random((100, 3))*10.0
+ self.Q_ref = np.array([5.0, 5.0, 5.0])
+ self.data_domain = np.array([[0.0, 10.0], [0.0, 10.0], [0.0, 10.0]])
+ self.mdim = 3
+ bin_size = 0.15*self.data_domain[:, 1]
+ self.maximum = 1/np.product(bin_size)
+ def ifun(outputs):
+ """
+ Indicator function
+ """
+ inside = np.logical_and(np.all(np.greater_equal(outputs,
+ self.Q_ref-.5*bin_size), axis=1), np.all(np.less_equal(outputs,
+ self.Q_ref+.5*bin_size), axis=1))
+ max_values = np.repeat(self.maximum, outputs.shape[0], 0)
+ return inside.astype('float64')*max_values
+ self.rho_D = ifun
+
+
+class kernel(object):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.kernel`
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ self.kernel = asam.kernel()
+
+ def test_init(self):
+ """
+ Test the initalization of :class:`bet.sampling.adaptiveSampling.kernel`
+ """
+ assert self.kernel.TOL == 1e-8
+ assert self.kernel.increase == 1.0
+ assert self.kernel.decrease == 1.0
+
+ def test_delta_step(self):
+ """
+ Test the delta_step method of
+ :class:`bet.sampling.adaptiveSampling.kernel`
+ """
+ kern_new, proposal = self.kernel.delta_step(self.data)
+ assert kern_new == None
+ assert proposal.shape == (self.data.shape[0],)
+
+
+class test_kernel_1D(kernel, data_1D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.kernel` on a 1D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_kernel_1D, self).createData()
+ super(test_kernel_1D, self).setUp()
+
+class test_kernel_2D(kernel, data_2D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.kernel` on a 2D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_kernel_2D, self).createData()
+ super(test_kernel_2D, self).setUp()
+
+class test_kernel_3D(kernel, data_3D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.kernel` on a 3D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_kernel_3D, self).createData()
+ super(test_kernel_3D, self).setUp()
+
+
+class rhoD_kernel(kernel):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.rhoD_kernel`
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ self.kernel = asam.rhoD_kernel(self.maximum, self.rho_D)
+
+ def test_init(self):
+ """
+ Test the initalization of :class:`bet.sampling.adaptiveSampling.rhoD_kernel`
+ """
+ assert self.kernel.TOL == 1e-8
+ assert self.kernel.increase == 2.0
+ assert self.kernel.decrease == 0.5
+ assert self.kernel.MAX == self.maximum
+ assert self.kernel.rho_D == self.rho_D
+ assert self.kernel.sort_ascending == False
+
+ def test_delta_step(self):
+ """
+ Test the delta_step method of
+ :class:`bet.sampling.adaptiveSampling.rhoD_kernel`
+ """
+ kern_new, proposal = self.kernel.delta_step(self.data)
+ nptest.assert_array_equal(kern_new, self.rho_D(self.data))
+ assert proposal == None
+
+ data = np.vstack([self.Q_ref+3.0, self.Q_ref, self.Q_ref-3.0])
+ data_new = np.vstack([self.Q_ref, self.Q_ref+3.0, self.Q_ref-3.0])
+ kern_old = self.rho_D(data)
+ kern_new, proposal = self.kernel.delta_step(data_new, kern_old)
+ nptest.assert_array_equal(proposal, [0.5, 2.0, 1.0])
+
+class test_rhoD_kernel_1D(rhoD_kernel, data_1D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.rhoD_kernel` on a 1D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_rhoD_kernel_1D, self).createData()
+ super(test_rhoD_kernel_1D, self).setUp()
+
+class test_rhoD_kernel_2D(rhoD_kernel, data_2D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.rhoD_kernel` on a 2D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_rhoD_kernel_2D, self).createData()
+ super(test_rhoD_kernel_2D, self).setUp()
+
+class test_rhoD_kernel_3D(rhoD_kernel, data_3D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.rhoD_kernel` on a 3D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_rhoD_kernel_3D, self).createData()
+ super(test_rhoD_kernel_3D, self).setUp()
+
+class maxima_kernel(kernel):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_kernel`
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ self.kernel = asam.maxima_kernel(np.vstack([self.Q_ref, self.Q_ref+.5]), self.rho_D)
+
+ def test_init(self):
+ """
+ Test the initalization of :class:`bet.sampling.adaptiveSampling.maxima_kernel`
+ """
+ assert self.kernel.TOL == 1e-8
+ assert self.kernel.increase == 2.0
+ assert self.kernel.decrease == 0.5
+ nptest.assert_equal(self.kernel.MAXIMA, np.vstack([self.Q_ref,
+ self.Q_ref+.5]))
+ assert self.kernel.num_maxima == 2
+ nptest.assert_equal(self.kernel.rho_max,
+ self.rho_D(np.vstack([self.Q_ref, self.Q_ref+.5])))
+ assert self.kernel.sort_ascending == True
+
+ def test_delta_step(self):
+ """
+ Test the delta_step method of
+ :class:`bet.sampling.adaptiveSampling.maxima_kernel`
+ """
+ data_old = np.vstack([self.Q_ref+3.0, self.Q_ref, self.Q_ref-3.0])
+ kern_old, proposal = self.kernel.delta_step(data_old)
+
+ # TODO: check kern_old
+ #nptest.assert_array_equal(kern_old, np.zeros((self.data.shape[0],))
+ assert proposal == None
+
+ data_new = np.vstack([self.Q_ref, self.Q_ref+3.0, self.Q_ref-3.0])
+ kern_new, proposal = self.kernel.delta_step(data_new, kern_old)
+
+ #TODO: check kern_new
+ #nptest.assert_array_eqyal(kern_new, something)
+ nptest.assert_array_equal(proposal, [0.5, 2.0, 1.0])
+
+class test_maxima_kernel_1D(maxima_kernel, data_1D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_kernel` on a 1D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_kernel_1D, self).createData()
+ super(test_maxima_kernel_1D, self).setUp()
+
+class test_maxima_kernel_2D(maxima_kernel, data_2D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_kernel` on a 2D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_kernel_2D, self).createData()
+ super(test_maxima_kernel_2D, self).setUp()
+
+class test_maxima_kernel_3D(maxima_kernel, data_3D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_kernel` on a 3D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_kernel_3D, self).createData()
+ super(test_maxima_kernel_3D, self).setUp()
+
+
+class maxima_mean_kernel(maxima_kernel):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel`
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ self.kernel = asam.maxima_mean_kernel(np.vstack([self.Q_ref, self.Q_ref+.5]), self.rho_D)
+
+ def test_init(self):
+ """
+ Test the initalization of
+ :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel`
+ """
+ assert self.kernel.radius == None
+ assert self.kernel.mean == None
+ assert self.kernel.current_clength == 0
+ super(maxima_mean_kernel, self).test_init()
+
+ def test_reset(self):
+ """
+ Test the method
+ :meth:`bet.sampling.adaptiveSampling.maxima_mean_kernel.reset`
+ """
+ self.kernel.reset()
+ assert self.kernel.radius == None
+ assert self.kernel.mean == None
+ assert self.kernel.current_clength == 0
+
+ def test_delta_step(self):
+ """
+ Test the delta_step method of
+ :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel`
+ """
+ super(maxima_mean_kernel, self).test_delta_step()
+ # TODO
+ # check self.current_clength
+ # check self.radius
+ # check self.mean
+
+class test_maxima_mean_kernel_1D(maxima_mean_kernel, data_1D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel` on a 1D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_mean_kernel_1D, self).createData()
+ super(test_maxima_mean_kernel_1D, self).setUp()
+
+class test_maxima_mean_kernel_2D(maxima_mean_kernel, data_2D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel` on a 2D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_mean_kernel_2D, self).createData()
+ super(test_maxima_mean_kernel_2D, self).setUp()
+
+class test_maxima_mean_kernel_3D(maxima_mean_kernel, data_3D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.maxima_mean_kernel` on a 3D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_maxima_mean_kernel_3D, self).createData()
+ super(test_maxima_mean_kernel_3D, self).setUp()
+
+
+class transition_set(object):
+ """
+ Tests :class:`bet.sampling.adaptiveSamplinng.transition_set`
+ """
+ def setUp(self):
+ """
+ Set Up
+ """
+ self.t_set = asam.transition_set(.5, .5**5, 1.0)
+
+ def test_init(self):
+ """
+ Tests the initialization of :class:`bet.sampling.adaptiveSamplinng.transition_set`
+ """
+ assert self.t_set.init_ratio == .5
+ assert self.t_set.min_ratio == .5**5
+ assert self.t_set.max_ratio == 1.0
+
+ def test_step(self):
+ """
+ Tests the method
+ :meth:`bet.sampling.adaptiveSampling.transition_set.step`
+ """
+ # define step_ratio, param_width, param_left, param_right, samples_old
+ # from data
+ param_left = np.repeat([self.data_domain[:, 0]], self.data.shape[0], 0)
+ param_right = np.repeat([self.data_domain[:, 1]], self.data.shape[0], 0)
+ param_width = param_right - param_left
+
+ step_ratio = 0.5*np.ones(self.data.shape[0],)
+ step_ratio[self.data.shape[0]/2:] = .1
+ step_size = np.repeat([step_ratio], param_width.shape[1],
+ 0).transpose()*param_width
+ # take a step
+ samples_new = self.t_set.step(step_ratio, param_width, param_left,
+ param_right, self.data)
+
+ # make sure the proposed steps are inside the domain
+ # check dimensions of samples
+ assert samples_new.shape == self.data.shape
+
+ # are the samples in bounds?
+ assert np.all(samples_new <= param_right)
+ assert np.all(samples_new >= param_left)
+
+ # make sure the proposed steps are inside the box defined around their
+ # generating old samples
+ assert np.all(samples_new <= self.data+0.5*step_size)
+ assert np.all(samples_new >= self.data-0.5*step_size)
+
+
+class test_transition_set_1D(transition_set, data_1D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.transition_set` on a 1D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_transition_set_1D, self).createData()
+ super(test_transition_set_1D, self).setUp()
+
+class test_transition_set_2D(transition_set, data_2D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.transition_set` on a 2D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_transition_set_2D, self).createData()
+ super(test_transition_set_2D, self).setUp()
+
+class test_transition_set_3D(transition_set, data_3D):
+ """
+ Test :class:`bet.sampling.adaptiveSampling.transition_set` on a 3D data space.
+ """
+ def setUp(self):
+ """
+ Set up
+ """
+ super(test_transition_set_3D, self).createData()
+ super(test_transition_set_3D, self).setUp()
+
diff --git a/test/test_sampling/test_basicSampling.py b/test/test_sampling/test_basicSampling.py
index 171c66eb..53fedf7b 100644
--- a/test/test_sampling/test_basicSampling.py
+++ b/test/test_sampling/test_basicSampling.py
@@ -1,7 +1,235 @@
-"""
-This module contains tests for the :mod:`bet.sampling.basicSampling` module.
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
- * check input/output dimensions and format
- * make sure that $\lambda_i \in \Lambda$
- * provide diagnoistic output (?)
+# Lindley Graham 04/07/2015
+"""
+This module contains unittests for :mod:`~bet.sampling.basicSampling:`
"""
+
+import unittest, os, bet, pyDOE
+import numpy.testing as nptest
+import numpy as np
+import bet.sampling.basicSampling as bsam
+import scipy.io as sio
+from bet.Comm import *
+
+local_path = os.path.join(os.path.dirname(bet.__file__), "../test/test_sampling")
+
+
+@unittest.skipIf(size > 1, 'Only run in serial')
+def test_loadmat():
+ """
+ Tests :meth:`bet.sampling.basicSampling.loadmat`
+ """
+ np.random.seed(1)
+ mdat1 = {'samples':np.random.random((5,1)),
+ 'data':np.random.random((5,1)), 'num_samples':5}
+ mdat2 = {'samples':np.random.random((6,1)), 'num_samples':6}
+ model = "this is not a model"
+
+ sio.savemat(os.path.join(local_path, 'testfile1'), mdat1)
+ sio.savemat(os.path.join(local_path, 'testfile2'), mdat2)
+
+ (loaded_sampler1, samples1, data1) = bsam.loadmat(os.path.join(local_path,
+ 'testfile1'))
+ nptest.assert_array_equal(samples1, mdat1['samples'])
+ nptest.assert_array_equal(data1, mdat1['data'])
+ assert loaded_sampler1.num_samples == 5
+ assert loaded_sampler1.lb_model == None
+
+ (loaded_sampler2, samples2, data2) = bsam.loadmat(os.path.join(local_path,
+ 'testfile2'), model)
+ nptest.assert_array_equal(samples2, mdat2['samples'])
+ nptest.assert_array_equal(data2, None)
+ assert loaded_sampler2.num_samples == 6
+ assert loaded_sampler2.lb_model == model
+ if os.path.exists(os.path.join(local_path, 'testfile1.mat')):
+ os.remove(os.path.join(local_path, 'testfile1.mat'))
+ if os.path.exists(os.path.join(local_path, 'testfile2.mat')):
+ os.remove(os.path.join(local_path, 'testfile2.mat'))
+
+def verify_user_samples(model, sampler, samples, savefile, parallel):
+ # evalulate the model at the samples directly
+ data = model(samples)
+
+ # evaluate the model at the samples
+ (my_samples, my_data) = sampler.user_samples(samples, savefile,
+ parallel)
+
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+ if len(samples.shape) == 1:
+ samples = np.expand_dims(samples, axis=1)
+
+ # compare the samples
+ nptest.assert_array_equal(samples, my_samples)
+ # compare the data
+ nptest.assert_array_equal(data, my_data)
+ # did num_samples get updated?
+ assert samples.shape[0] == sampler.num_samples
+ # did the file get correctly saved?
+
+ if rank == 0:
+ mdat = sio.loadmat(savefile)
+ nptest.assert_array_equal(samples, mdat['samples'])
+ nptest.assert_array_equal(data, mdat['data'])
+ comm.Barrier()
+
+def verify_random_samples(model, sampler, sample_type, param_min, param_max,
+ num_samples, savefile, parallel):
+ # recreate the samples
+ if num_samples == None:
+ num_samples = sampler.num_samples
+ param_left = np.repeat([param_min], num_samples, 0)
+ param_right = np.repeat([param_max], num_samples, 0)
+ samples = (param_right-param_left)
+ if sample_type == "lhs":
+ samples = samples * pyDOE.lhs(param_min.shape[-1], num_samples)
+ elif sample_type == "random" or "r":
+ np.random.seed(1)
+ samples = samples * np.random.random(param_left.shape)
+ samples = samples + param_left
+ # evalulate the model at the samples directly
+ data = model(samples)
+
+ # evaluate the model at the samples
+ # reset the random seed
+ if sample_type == "random" or "r":
+ np.random.seed(1)
+ (my_samples, my_data) = sampler.user_samples(samples, savefile,
+ parallel)
+
+ # make sure that the samples are within the boundaries
+ assert np.all(my_samples <= param_right)
+ assert np.all(my_samples >= param_left)
+
+ if len(data.shape) == 1:
+ data = np.expand_dims(data, axis=1)
+ if len(samples.shape) == 1:
+ samples = np.expan_dims(samples, axis=1)
+
+ # compare the samples
+ nptest.assert_array_equal(samples, my_samples)
+ # compare the data
+ nptest.assert_array_equal(data, my_data)
+ # did num_samples get updated?
+ assert samples.shape[0] == sampler.num_samples
+ assert num_samples == sampler.num_samples
+ # did the file get correctly saved?
+
+ if rank == 0:
+ mdat = sio.loadmat(savefile)
+ nptest.assert_array_equal(samples, mdat['samples'])
+ nptest.assert_array_equal(data, mdat['data'])
+ comm.Barrier()
+
+
+class Test_basic_sampler(unittest.TestCase):
+ """
+ Test :class:`bet.sampling.basicSampling.sampler`.
+ """
+
+ def setUp(self):
+ # create 1-1 map
+ self.param_min1 = np.zeros((1, ))
+ self.param_max1 = np.zeros((1, ))
+ def map_1t1(x):
+ return np.sin(x)
+ # create 3-1 map
+ self.param_min3 = np.zeros((3, ))
+ self.param_max3 = np.ones((3, ))
+ def map_3t1(x):
+ return np.sum(x, 1)
+ # create 3-2 map
+ def map_3t2(x):
+ return np.vstack(([x[:, 0]+x[:, 1], x[:, 2]])).transpose()
+ # create 10-4 map
+ self.param_min10 = np.zeros((10, ))
+ self.param_max10 = np.ones((10, ))
+ def map_10t4(x):
+ x1 = x[:, 0] + x[:, 1]
+ x2 = x[:, 2] + x[:, 3]
+ x3 = x[:, 4] + x[:, 5]
+ x4 = np.sum(x[:, [6, 7, 8, 9]], 1)
+ return np.vstack([x1, x2, x3, x4]).transpose()
+ num_samples = 100
+ self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"]
+ self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4]
+ self.samplers = []
+ for model in self.models:
+ self.samplers.append(bsam.sampler(model, num_samples))
+
+ def tearDown(self):
+ """
+ Clean up extra files
+ """
+ if rank == 0:
+ for f in self.savefiles:
+ if os.path.exists(f+".mat"):
+ os.remove(f+".mat")
+ if size > 1:
+ for f in self.savefiles:
+ proc_savefile = os.path.join(local_path, os.path.dirname(f),
+ "proc{}{}.mat".format(rank, os.path.basename(f)))
+ print proc_savefile
+ if os.path.exists(proc_savefile):
+ os.remove(proc_savefile)
+ proc_savefile = os.path.join(local_path, os.path.dirname(f),
+ "p{}proc{}{}.mat".format(rank, rank, os.path.basename(f)))
+ if os.path.exists(proc_savefile):
+ os.remove(proc_savefile)
+ print proc_savefile
+
+ def test_init(self):
+ """
+ Test initalization of :class:`bet.sampling.basicSampling.sampler`
+ """
+ assert self.samplers[0].num_samples == 100
+ assert self.samplers[0].lb_model == self.models[0]
+ assert bsam.sampler(self.models[0], None).num_samples == None
+
+ def test_update(self):
+ """
+ Test :meth:`bet.sampling.basicSampling.sampler.save`
+ """
+ mdict = {"frog":3, "moose":2}
+ self.samplers[0].update_mdict(mdict)
+ assert self.samplers[0].num_samples == mdict["num_samples"]
+
+ def test_user_samples(self):
+ """
+ Test :meth:`bet.sampling.basicSampling.sampler.user_samples` for
+ three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
+ """
+ # create a list of different sets of samples
+ list_of_samples = [np.ones((4, )), np.ones((4, 1)), np.ones((4, 3)),
+ np.ones((4, 3)), np.ones((4, 10))]
+
+ test_list = zip(self.models, self.samplers, list_of_samples,
+ self.savefiles)
+
+ for model, sampler, samples, savefile in test_list:
+ for parallel in [False, True]:
+ verify_user_samples(model, sampler, samples, savefile,
+ parallel)
+
+ def test_random_samples(self):
+ """
+ Test :meth:`bet.sampling.basicSampling.sampler.random_samples` for three
+ different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
+ """
+ param_min_list = [self.param_min1, self.param_min1, self.param_min3,
+ self.param_min3, self.param_min10]
+ param_max_list = [self.param_max1, self.param_max1, self.param_max3,
+ self.param_max3, self.param_max10]
+
+
+ test_list = zip(self.models, self.samplers, param_min_list,
+ param_max_list, self.savefiles)
+
+ for model, sampler, param_min, param_max, savefile in test_list:
+ for sample_type in ["random", "r", "lhs"]:
+ for num_samples in [None, 25]:
+ for parallel in [False, True]:
+ verify_random_samples(model, sampler, sample_type,
+ param_min, param_max, num_samples, savefile,
+ parallel)
diff --git a/test/test_util.py b/test/test_util.py
new file mode 100644
index 00000000..37f26010
--- /dev/null
+++ b/test/test_util.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2014-2015 Lindley Graham and Steven Mattis
+
+# Lindley Graham 04/07/2015
+"""
+This module contains unittests for :mod:`~bet.util`
+"""
+
+import bet.util as util
+from bet.Comm import *
+import numpy.testing as nptest
+import numpy as np
+
+def get_binary_rep(i, dim):
+ """
+ A ``dim`` bit representation of ``i`` in binary.
+
+ :param int i: number to represent in binary
+ :param int dim: number of bits to use in the representation
+ :rtype: string
+ :returns: string representation of binary represenation of i
+
+ """
+ short = bin(i).partition('b')[-1]
+ full = '0'*(dim-len(short))+short
+ return full
+
+def compare_to_bin_rep(xnew):
+ """
+ xnew[i] == get_binar_rep(i, dim)
+ """
+ rep_compare = np.zeros((xnew.shape[0],), np.bool)
+ for i, row in enumerate(xnew):
+ row_rep = ''
+ for v in row:
+ row_rep += str(v)
+ rep_compare[i] = (row_rep == get_binary_rep(i, len(row)))
+ print rep_compare[i]
+ assert np.all(rep_compare)
+
+def test_meshgrid_ndim():
+ """
+ Tests :meth:`bet.util.meshgrid_ndim` for upto 10 vectors where each vector is
+ equal to ``[0, 1]``.
+ """
+ for i in xrange(10):
+ x = [[0, 1] for v in xrange(i+1)]
+ yield compare_to_bin_rep, util.meshgrid_ndim(x)
+
+def test_get_global_values():
+ """
+ Tests :meth:`bet.util.get_global_values`.
+ """
+ for provide_shape in [True, False]:
+ for i in xrange(5):
+ yield compare_get_global_values, i, provide_shape
+
+def compare_get_global_values(i, provide_shape):
+ """
+ Compares the results of get global values for a vector of shape ``(size*2,
+ i)``.
+
+ :param int i: Dimension of the vector of length ``size*2``
+
+ """
+ if rank == 0:
+ if i == 0:
+ original_array = np.array(np.random.random((size*2, )))
+ else:
+ original_array = np.array(np.random.random((size*2, i)))
+ else:
+ original_array = None
+ original_array = comm.bcast(original_array)
+ my_len = original_array.shape[0]/size
+ my_index = range(0+rank*my_len, (rank+1)*my_len)
+ if i == 0:
+ my_array = original_array[my_index]
+ else:
+ my_array = original_array[my_index, :]
+ if provide_shape:
+ recomposed_array = util.get_global_values(my_array, original_array.shape)
+ else:
+ recomposed_array = util.get_global_values(my_array)
+ nptest.assert_array_equal(original_array, recomposed_array)
+
+
+def test_fix_dimensions_vector():
+ """
+ Tests :meth:`bet.util.fix_dimensions_vector`
+ """
+ values = [1, [1], range(5), np.array(range(5))]
+ shapes = [(1,), (1,), (5,), (5,)]
+ for value, shape in zip(values, shapes):
+ vector = util.fix_dimensions_vector(value)
+ assert vector.shape == shape
+
+def test_fix_dimensions_vector_2darray():
+ """
+ Tests :meth:`bet.util.fix_dimensions_vector_2darray`
+ """
+ values = [1, [1], np.empty((1,1)), range(5), np.array(range(5)),
+ np.empty((5,1))]
+ shapes = [(1,1), (1,1), (1,1), (5,1), (5,1), (5,1)]
+ for value, shape in zip(values, shapes):
+ vector = util.fix_dimensions_vector_2darray(value)
+ assert vector.shape == shape
+
+def test_fix_dimensions_domain():
+ """
+ Tests :meth:`bet.util.fix_dimensions_domain`
+ """
+ values = [range(2), np.empty((2,)), np.empty((2,1)), np.empty((1,2)),
+ np.empty((5,2)), np.empty((2,5))]
+ shapes = [(1,2), (1,2), (1,2), (1,2), (5,2), (5,2)]
+ for value, shape in zip(values, shapes):
+ vector = util.fix_dimensions_domain(value)
+ assert vector.shape == shape
+
+def test_fix_dimensions_data_nodim():
+ """
+ Tests :meth`bet.util.fix_dimensions_domain` when `dim` is not specified
+ """
+ values = [1, [1], range(2), np.empty((2,)), np.empty((2,1)), np.empty((1,2)),
+ np.empty((5,2)), np.empty((2,5))]
+ shapes = [(1,1), (1,1), (2,1), (2,1), (2,1), (1,2), (5,2), (2,5)]
+ print len(values), len(shapes)
+ for value, shape in zip(values, shapes):
+ vector = util.fix_dimensions_data(value)
+ print vector, value
+ print vector.shape, shape
+ assert vector.shape == shape
+
+def test_fix_dimensions_data_dim():
+ """
+ Tests :meth`bet.util.fix_dimensions_domain` when `dim` is specified
+ """
+ values = [1, [1], range(2), np.empty((2,)), np.empty((2,1)), np.empty((1,2)),
+ np.empty((5,2)), np.empty((2,5)), np.empty((5,2)), np.empty((2,5))]
+ shapes = [(1,1), (1,1), (1,2), (1,2), (1,2), (1,2), (5,2), (5,2), (2,5),
+ (2,5)]
+ dims = [1, 1, 2, 2, 2, 2, 2, 2, 5, 5]
+ for value, shape, dim in zip(values, shapes, dims):
+ vector = util.fix_dimensions_data(value, dim)
+ print vector, value
+ print vector.shape, shape, dim
+ assert vector.shape == shape
+
diff --git a/test/test_visualize/test_plotDomains.py b/test/test_visualize/test_plotDomains.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/test/test_visualize/test_plotP.py b/test/test_visualize/test_plotP.py
deleted file mode 100644
index e69de29b..00000000