diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst index c9d034d1..d02e0b30 100644 --- a/docs/source/tutorial.rst +++ b/docs/source/tutorial.rst @@ -756,4 +756,40 @@ Now let's see how to do this a bit more simply, and in a way that provides signf torch.all(torch.isclose(result_vectorized, result)) # True! +Custom Integrators +------------------ + +It is of course possible to extend our provided Integrators, perhaps for a special class of functions or for a new algorithm. + +.. code:: ipython3 + + class GaussHermite(Gaussian): + """Gauss Hermite quadrature rule in torch, for integrals of the form :math:`\\int_{-\\infty}^{+\\infty} e^{-x^{2}} f(x) dx`. It will correctly integrate + polynomials of degree :math:`2n - 1` or less over the interval + :math:`[-\\infty, \\infty]` with weight function :math:`f(x) = e^{-x^2}`. See https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature + """ + + def __init__(self): + super().__init__() + self.name = "Gauss-Hermite" + self.root_fn = scipy.special.roots_hermite + self.default_integration_domain = [[-1 * numpy.inf, numpy.inf]] + self.wrapper_func = None + + @staticmethod + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): + """Apply "composite" rule for gaussian integrals + cur_dim_areas will contain the areas per dimension + """ + # We collapse dimension by dimension + for _ in range(dim): + cur_dim_areas = anp.sum(cur_dim_areas, axis=len(cur_dim_areas.shape) - 1) + return cur_dim_areas + + gh=torchquad.GaussHermite() + integral=gh.integrate(lambda x: 1-x,dim=1,N=200) #integral from -inf to inf of np.exp(-(x**2))*(1-x) + # Computed integral was 1.7724538509055168. + # analytic result = sqrt(pi) + + diff --git a/torchquad/__init__.py b/torchquad/__init__.py index ac6c8df9..c5b66bf7 100644 --- a/torchquad/__init__.py +++ b/torchquad/__init__.py @@ -9,6 +9,9 @@ from .integration.simpson import Simpson from .integration.boole import Boole from .integration.vegas import VEGAS +from .integration.gaussian import GaussLegendre +from .integration.grid_integrator import GridIntegrator +from .integration.base_integrator import BaseIntegrator from .integration.rng import RNG @@ -22,12 +25,15 @@ from .utils.deployment_test import _deployment_test __all__ = [ + "GridIntegrator", + "BaseIntegrator", "IntegrationGrid", "MonteCarlo", "Trapezoid", "Simpson", "Boole", "VEGAS", + "GaussLegendre", "RNG", "plot_convergence", "plot_runtime", diff --git a/torchquad/integration/base_integrator.py b/torchquad/integration/base_integrator.py index b2fb2b1b..c97680d5 100644 --- a/torchquad/integration/base_integrator.py +++ b/torchquad/integration/base_integrator.py @@ -29,30 +29,41 @@ def integrate(self): NotImplementedError("This is an abstract base class. Should not be called.") ) - def _eval(self, points): + def _eval(self, points, weights=None, args=None): """Call evaluate_integrand to evaluate self._fn function at the passed points and update self._nr_of_evals Args: points (backend tensor): Integration points + weights (backend tensor, optional): Integration weights. Defaults to None. + args (list or tuple, optional): Any arguments required by the function. Defaults to None. """ - result, num_points = self.evaluate_integrand(self._fn, points) + result, num_points = self.evaluate_integrand( + self._fn, points, weights=weights, args=args + ) self._nr_of_fevals += num_points return result @staticmethod - def evaluate_integrand(fn, points): + def evaluate_integrand(fn, points, weights=None, args=None): """Evaluate the integrand function at the passed points Args: fn (function): Integrand function points (backend tensor): Integration points + weights (backend tensor, optional): Integration weights. Defaults to None. + args (list or tuple, optional): Any arguments required by the function. Defaults to None. Returns: backend tensor: Integrand function output int: Number of evaluated points """ num_points = points.shape[0] - result = fn(points) + + if args is None: + args = () + + result = fn(points, *args) + if infer_backend(result) != infer_backend(points): warnings.warn( "The passed function's return value has a different numerical backend than the passed points. Will try to convert. Note that this may be slow as it results in memory transfers between CPU and GPU, if torchquad uses the GPU." @@ -67,17 +78,27 @@ def evaluate_integrand(fn, points): f"where first dimension matches length of passed elements. " ) + if weights is not None: + if ( + len(result.shape) > 1 + ): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *= + integrand_shape = anp.array( + result.shape[1:], like=infer_backend(points) + ) + weights = anp.repeat( + anp.expand_dims(weights, axis=1), anp.prod(integrand_shape) + ).reshape((weights.shape[0], *(integrand_shape))) + result *= weights + return result, num_points @staticmethod def _check_inputs(dim=None, N=None, integration_domain=None): """Used to check input validity - Args: dim (int, optional): Dimensionality of function to integrate. Defaults to None. N (int, optional): Total number of integration points. Defaults to None. integration_domain (list or backend tensor, optional): Integration domain, e.g. [[0,1],[1,2]]. Defaults to None. - Raises: ValueError: if inputs are not compatible with each other. """ diff --git a/torchquad/integration/boole.py b/torchquad/integration/boole.py index c4c66715..99e1b22a 100644 --- a/torchquad/integration/boole.py +++ b/torchquad/integration/boole.py @@ -28,7 +28,7 @@ def integrate(self, fn, dim, N=None, integration_domain=None, backend=None): return super().integrate(fn, dim, N, integration_domain, backend) @staticmethod - def _apply_composite_rule(cur_dim_areas, dim, hs): + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): """Apply composite Boole quadrature. cur_dim_areas will contain the areas per dimension """ diff --git a/torchquad/integration/gaussian.py b/torchquad/integration/gaussian.py new file mode 100644 index 00000000..c76cc982 --- /dev/null +++ b/torchquad/integration/gaussian.py @@ -0,0 +1,152 @@ +import numpy +from autoray import numpy as anp +from .grid_integrator import GridIntegrator + + +class Gaussian(GridIntegrator): + """Gaussian quadrature methods inherit from this. Default behaviour is Gauss-Legendre quadrature on [-1,1].""" + + def __init__(self): + super().__init__() + self.name = "Gauss-Legendre" + self.root_fn = numpy.polynomial.legendre.leggauss + self.root_args = () + self.default_integration_domain = [[-1, 1]] + self.transform_interval = True + self._cache = {} + + def integrate(self, fn, dim, N=8, integration_domain=None, backend=None): + """Integrates the passed function on the passed domain using Simpson's rule. + + Args: + fn (func): The function to integrate over. + dim (int): Dimensionality of the integration domain. + N (int, optional): Total number of sample points to use for the integration. Should be odd. Defaults to 3 points per dimension if None is given. + integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It also determines the numerical backend if possible. + backend (string, optional): Numerical backend. This argument is ignored if the backend can be inferred from integration_domain. Defaults to the backend from the latest call to set_up_backend or "torch" for backwards compatibility. + + Returns: + backend-specific number: Integral value + """ + return super().integrate(fn, dim, N, integration_domain, backend) + + def _weights(self, N, dim, backend, requires_grad=False): + """return the weights, broadcast across the dimensions, generated from the polynomial of choice + + Args: + N (int): number of nodes + dim (int): number of dimensions + backend (string): which backend array to return + + Returns: + backend tensor: the weights + """ + weights = anp.array(self._cached_points_and_weights(N)[1], like=backend) + if backend == "torch": + weights.requires_grad = requires_grad + return anp.prod( + anp.array( + anp.stack( + list(anp.meshgrid(*([weights] * dim))), like=backend, dim=0 + ) + ), + axis=0, + ).ravel() + else: + return anp.prod( + anp.meshgrid(*([weights] * dim), like=backend), axis=0 + ).ravel() + + def _roots(self, N, backend, requires_grad=False): + """return the roots generated from the polynomial of choice + + Args: + N (int): number of nodes + backend (string): which backend array to return + + Returns: + backend tensor: the roots + """ + roots = anp.array(self._cached_points_and_weights(N)[0], like=backend) + if requires_grad: + roots.requires_grad = True + return roots + + @property + def _grid_func(self): + """ + function for generating a grid to be integrated over i.e., the polynomial roots, resized to the domain. + """ + + def f(a, b, N, requires_grad, backend=None): + return self._resize_roots(a, b, self._roots(N, backend, requires_grad)) + + return f + + def _resize_roots(self, a, b, roots): # scale from [-1,1] to [a,b] + """resize the roots based on domain of [a,b] + + Args: + a (backend tensor): lower bound + b (backend tensor): upper bound + roots (backend tensor): polynomial nodes + + Returns: + backend tensor: rescaled roots + """ + return roots + + # credit for the idea https://github.com/scipy/scipy/blob/dde50595862a4f9cede24b5d1c86935c30f1f88a/scipy/integrate/_quadrature.py#L72 + def _cached_points_and_weights(self, N): + """wrap the calls to get weights/roots in a cache + + Args: + N (int): number of nodes to return + backend (string): which backend to use + + Returns: + tuple: nodes and weights + """ + root_args = (N, *self.root_args) + if not isinstance(N, int): + if hasattr(N, "item"): + root_args = (N.item(), *self.root_args) + else: + raise NotImplementedError( + f"N {N} is not an int and lacks an `item` method" + ) + if root_args in self._cache: + return self._cache[root_args] + self._cache[root_args] = self.root_fn(*root_args) + return self._cache[root_args] + + @staticmethod + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): + """Apply "composite" rule for gaussian integrals + + cur_dim_areas will contain the areas per dimension + """ + # We collapse dimension by dimension + for cur_dim in range(dim): + cur_dim_areas = ( + 0.5 + * (domain[cur_dim][1] - domain[cur_dim][0]) + * anp.sum(cur_dim_areas, axis=len(cur_dim_areas.shape) - 1) + ) + return cur_dim_areas + + +class GaussLegendre(Gaussian): + """Gauss Legendre quadrature rule in torch. See https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss%E2%80%93Legendre_quadrature. + + Examples + -------- + >>> gl=torchquad.GaussLegendre() + >>> integral = gl.integrate(lambda x:np.sin(x), dim=1, N=101, integration_domain=[[0,5]]) #integral from 0 to 5 of np.sin(x) + |TQ-INFO| Computed integral was 0.7163378000259399 #analytic result = 1-np.cos(5)""" + + def __init__(self): + super().__init__() + + def _resize_roots(self, a, b, roots): # scale from [-1,1] to [a,b] + return ((b - a) / 2) * roots + ((a + b) / 2) diff --git a/torchquad/integration/grid_integrator.py b/torchquad/integration/grid_integrator.py new file mode 100644 index 00000000..ac0428bd --- /dev/null +++ b/torchquad/integration/grid_integrator.py @@ -0,0 +1,268 @@ +from loguru import logger +from autoray import numpy as anp, infer_backend + +from .base_integrator import BaseIntegrator +from .integration_grid import IntegrationGrid +from .utils import ( + _linspace_with_grads, + expand_func_values_and_squeeze_integral, + _setup_integration_domain, +) + + +class GridIntegrator(BaseIntegrator): + """The abstract integrator that grid-like integrators (Newton-Cotes and Gaussian) integrators inherit from""" + + def __init__(self): + super().__init__() + + @property + def _grid_func(self): + def f(a, b, N, requires_grad=False, backend=None): + return _linspace_with_grads(a, b, N, requires_grad=requires_grad) + + return f + + def _weights(self, N, dim, backend, requires_grad=False): + return None + + def integrate(self, fn, dim, N, integration_domain, backend): + """Integrate the passed function on the passed domain using a Composite Newton Cotes rule. + The argument meanings are explained in the sub-classes. + + Returns: + float: integral value + """ + # If N is None, use the minimal required number of points per dimension + if N is None: + N = self._get_minimal_N(dim) + + integration_domain = _setup_integration_domain(dim, integration_domain, backend) + backend = infer_backend(integration_domain) + self._check_inputs(dim=dim, N=N, integration_domain=integration_domain) + + grid_points, hs, n_per_dim = self.calculate_grid(N, integration_domain) + + logger.debug("Evaluating integrand on the grid.") + function_values, num_points = self.evaluate_integrand( + fn, grid_points, weights=self._weights(n_per_dim, dim, backend) + ) + self._nr_of_fevals = num_points + + return self.calculate_result( + function_values, dim, n_per_dim, hs, integration_domain + ) + + @expand_func_values_and_squeeze_integral + def calculate_result(self, function_values, dim, n_per_dim, hs, integration_domain): + """Apply the "composite rule" to calculate a result from the evaluated integrand. + + Args: + function_values (backend tensor): Output of the integrand + dim (int): Dimensionality + n_per_dim (int): Number of grid slices per dimension + hs (backend tensor): Distances between grid slices for each dimension + + Returns: + backend tensor: Quadrature result + """ + # Reshape the output to be [integrand_dim,N,N,...] points instead of [integrand_dim,dim*N] points + integrand_shape = function_values.shape[1:] + dim_shape = [n_per_dim] * dim + new_shape = [*integrand_shape, *dim_shape] + # We need to use einsum instead of just reshape here because reshape does not move the axis - it only reshapes. + # So the first line generates a character string for einsum, followed by moving the first dimension i.e `dim*N` + # to the end. Finally we reshape the resulting object so that instead of the last dimension being `dim*N`, it is + # `N,N,...` as desired. + einsum = "".join( + [chr(i + 65) for i in range(len(function_values.shape))] + ) # chr(i + 65) generates an alphabetical character + reshaped_function_values = anp.einsum( + f"{einsum}->{einsum[1:]}{einsum[0]}", function_values + ) + reshaped_function_values = reshaped_function_values.reshape(new_shape) + assert new_shape == list( + reshaped_function_values.shape + ), f"reshaping produced shape {reshaped_function_values.shape}, expected shape was {new_shape}" + logger.debug("Computing areas.") + + result = self._apply_composite_rule( + reshaped_function_values, dim, hs, integration_domain + ) + + logger.opt(lazy=True).info( + "Computed integral: {result}", result=lambda: str(result) + ) + return result + + def calculate_grid(self, N, integration_domain): + """Calculate grid points, widths and N per dim + + Args: + N (int): Number of points + integration_domain (backend tensor): Integration domain + + Returns: + backend tensor: Grid points + backend tensor: Grid widths + int: Number of grid slices per dimension + """ + N = self._adjust_N(dim=integration_domain.shape[0], N=N) + + # Log with lazy to avoid redundant synchronisations with certain + # backends + logger.opt(lazy=True).debug( + "Creating a grid for {name} to integrate a function with {N} points over {d}.", + name=lambda: type(self).__name__, + N=lambda: str(N), + d=lambda: str(integration_domain), + ) + + # Create grid and assemble evaluation points + grid = IntegrationGrid(N, integration_domain, self._grid_func) + + return grid.points, grid.h, grid._N + + @staticmethod + def _adjust_N(dim, N): + # Nothing to do by default + return N + + def get_jit_compiled_integrate( + self, dim, N=None, integration_domain=None, backend=None + ): + """Create an integrate function where the performance-relevant steps except the integrand evaluation are JIT compiled. + Use this method only if the integrand cannot be compiled. + The compilation happens when the function is executed the first time. + With PyTorch, return values of different integrands passed to the compiled function must all have the same format, e.g. precision. + + Args: + dim (int): Dimensionality of the integration domain. + N (int, optional): Total number of sample points to use for the integration. See the integrate method documentation for more details. + integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It can also determine the numerical backend. + backend (string, optional): Numerical backend. Defaults to integration_domain's backend if it is a tensor and otherwise to the backend from the latest call to set_up_backend or "torch" for backwards compatibility. + + Returns: + function(fn, integration_domain): JIT compiled integrate function where all parameters except the integrand and domain are fixed + """ + # If N is None, use the minimal required number of points per dimension + if N is None: + N = self._get_minimal_N(dim) + + integration_domain = _setup_integration_domain(dim, integration_domain, backend) + self._check_inputs(dim=dim, N=N, integration_domain=integration_domain) + backend = infer_backend(integration_domain) + if backend in ["tensorflow", "jax"]: + # Tensorflow and JAX automatically recompile functions if + # the parameters change + if backend == "tensorflow": + if not hasattr(self, "_tf_jit_calculate_grid"): + import tensorflow as tf + + self._tf_jit_calculate_grid = tf.function( + self.calculate_grid, jit_compile=True + ) + self._tf_jit_calculate_result = tf.function( + self.calculate_result, jit_compile=True + ) + jit_calculate_grid = self._tf_jit_calculate_grid + jit_calculate_result = self._tf_jit_calculate_result + elif backend == "jax": + if not hasattr(self, "_jax_jit_calculate_grid"): + import jax + + self._jax_jit_calculate_grid = jax.jit( + self.calculate_grid, static_argnames=["N"] + ) + self._jax_jit_calculate_result = jax.jit( + self.calculate_result, + static_argnums=( + 1, + 2, + ), # dim and n_per_dim + ) + jit_calculate_grid = self._jax_jit_calculate_grid + jit_calculate_result = self._jax_jit_calculate_result + + def compiled_integrate(fn, integration_domain): + grid_points, hs, n_per_dim = jit_calculate_grid(N, integration_domain) + function_values, _ = self.evaluate_integrand( + fn, grid_points, weights=self._weights(n_per_dim, dim, backend) + ) + return jit_calculate_result( + function_values, dim, int(n_per_dim), hs, integration_domain + ) + + return compiled_integrate + + elif backend == "torch": + # Torch requires explicit tracing with example inputs. + def do_compile(example_integrand): + import torch + + # Define traceable first and third steps + def step1(integration_domain): + grid_points, hs, n_per_dim = self.calculate_grid( + N, integration_domain + ) + return ( + grid_points, + hs, + torch.Tensor([n_per_dim]), + ) # n_per_dim is constant + + dim = int(integration_domain.shape[0]) + + def step3(function_values, hs, integration_domain): + return self.calculate_result( + function_values, dim, n_per_dim, hs, integration_domain + ) + + # Trace the first step + step1 = torch.jit.trace(step1, (integration_domain,)) + + # Get example input for the third step + grid_points, hs, n_per_dim = step1(integration_domain) + n_per_dim = int(n_per_dim) + function_values, _ = self.evaluate_integrand( + example_integrand, + grid_points, + weights=self._weights(n_per_dim, dim, backend), + ) + + # Trace the third step + # Avoid the warnings about a .grad attribute access of a + # non-leaf Tensor + if hs.requires_grad: + hs = hs.detach() + hs.requires_grad = True + if function_values.requires_grad: + function_values = function_values.detach() + function_values.requires_grad = True + step3 = torch.jit.trace( + step3, (function_values, hs, integration_domain) + ) + + # Define a compiled integrate function + def compiled_integrate(fn, integration_domain): + grid_points, hs, _ = step1(integration_domain) + function_values, _ = self.evaluate_integrand( + fn, grid_points, weights=self._weights(n_per_dim, dim, backend) + ) + result = step3(function_values, hs, integration_domain) + return result + + return compiled_integrate + + # Do the compilation when the returned function is executed the + # first time + compiled_func = [None] + + def lazy_compiled_integrate(fn, integration_domain): + if compiled_func[0] is None: + compiled_func[0] = do_compile(fn) + return compiled_func[0](fn, integration_domain) + + return lazy_compiled_integrate + + raise ValueError(f"Compilation not implemented for backend {backend}") diff --git a/torchquad/integration/integration_grid.py b/torchquad/integration/integration_grid.py index b2a25ec4..def7be3b 100644 --- a/torchquad/integration/integration_grid.py +++ b/torchquad/integration/integration_grid.py @@ -4,12 +4,16 @@ from loguru import logger from .utils import ( - _linspace_with_grads, _check_integration_domain, _setup_integration_domain, + _linspace_with_grads, ) +def grid_func(a, b, N, requires_grad=False, backend=None): + return _linspace_with_grads(a, b, N, requires_grad=requires_grad) + + class IntegrationGrid: """This class is used to store the integration grid for methods like Trapezoid or Simpsons, which require a grid.""" @@ -19,7 +23,7 @@ class IntegrationGrid: _dim = None # dimensionality of the grid _runtime = None # runtime for the creation of the integration grid - def __init__(self, N, integration_domain): + def __init__(self, N, integration_domain, grid_func=grid_func): """Creates an integration grid of N points in the passed domain. Dimension will be len(integration_domain) Args: @@ -28,9 +32,11 @@ def __init__(self, N, integration_domain): """ start = perf_counter() self._check_inputs(N, integration_domain) - if infer_backend(integration_domain) == "builtins": + backend = infer_backend(integration_domain) + if backend == "builtins": + backend = "torch" integration_domain = _setup_integration_domain( - len(integration_domain), integration_domain, backend="torch" + len(integration_domain), integration_domain, backend=backend ) self._dim = integration_domain.shape[0] @@ -57,11 +63,12 @@ def __init__(self, N, integration_domain): # Determine for each dimension grid points and mesh width for dim in range(self._dim): grid_1d.append( - _linspace_with_grads( + grid_func( integration_domain[dim][0], integration_domain[dim][1], self._N, requires_grad=requires_grad, + backend=backend, ) ) self.h = anp.stack( diff --git a/torchquad/integration/newton_cotes.py b/torchquad/integration/newton_cotes.py index 9fac80dd..f5f49978 100644 --- a/torchquad/integration/newton_cotes.py +++ b/torchquad/integration/newton_cotes.py @@ -1,229 +1,8 @@ -from loguru import logger -from autoray import infer_backend -from autoray import numpy as anp +from .grid_integrator import GridIntegrator -from .base_integrator import BaseIntegrator -from .integration_grid import IntegrationGrid -from .utils import _setup_integration_domain, expand_func_values_and_squeeze_integral - -class NewtonCotes(BaseIntegrator): +class NewtonCotes(GridIntegrator): """The abstract integrator that Composite Newton Cotes integrators inherit from""" def __init__(self): super().__init__() - - def integrate(self, fn, dim, N, integration_domain, backend): - """Integrate the passed function on the passed domain using a Composite Newton Cotes rule. - The argument meanings are explained in the sub-classes. - - Returns: - float: integral value - """ - # If N is None, use the minimal required number of points per dimension - if N is None: - N = self._get_minimal_N(dim) - - integration_domain = _setup_integration_domain(dim, integration_domain, backend) - self._check_inputs(dim=dim, N=N, integration_domain=integration_domain) - - grid_points, hs, n_per_dim = self.calculate_grid(N, integration_domain) - - logger.debug("Evaluating integrand on the grid.") - function_values, num_points = self.evaluate_integrand(fn, grid_points) - self._nr_of_fevals = num_points - - return self.calculate_result(function_values, dim, n_per_dim, hs) - - @expand_func_values_and_squeeze_integral - def calculate_result(self, function_values, dim, n_per_dim, hs): - """Apply the Composite Newton Cotes rule to calculate a result from the evaluated integrand. - - Args: - function_values (backend tensor): Output of the integrand - dim (int): Dimensionality - n_per_dim (int): Number of grid slices per dimension - hs (backend tensor): Distances between grid slices for each dimension - - Returns: - backend tensor: Quadrature result - """ - # Reshape the output to be [integrand_dim,N,N,...] points instead of [integrand_dim,dim*N] points - integrand_shape = function_values.shape[1:] - dim_shape = [n_per_dim] * dim - new_shape = [*integrand_shape, *dim_shape] - # We need to use einsum instead of just reshape here because reshape does not move the axis - it only reshapes. - # So the first line generates a character string for einsum, followed by moving the first dimension i.e `dim*N` - # to the end. Finally we reshape the resulting object so that instead of the last dimension being `dim*N`, it is - # `N,N,...` as desired. - einsum = "".join([chr(i + 65) for i in range(len(function_values.shape))]) - reshaped_function_values = anp.einsum( - f"{einsum}->{einsum[1:]}{einsum[0]}", function_values - ) - reshaped_function_values = reshaped_function_values.reshape(new_shape) - assert new_shape == list( - reshaped_function_values.shape - ), f"reshaping produced shape {reshaped_function_values.shape}, expected shape was {new_shape}" - logger.debug("Computing areas.") - - result = self._apply_composite_rule(reshaped_function_values, dim, hs) - - logger.opt(lazy=True).info( - "Computed integral: {result}", result=lambda: str(result) - ) - return result - - def calculate_grid(self, N, integration_domain): - """Calculate grid points, widths and N per dim - - Args: - N (int): Number of points - integration_domain (backend tensor): Integration domain - - Returns: - backend tensor: Grid points - backend tensor: Grid widths - int: Number of grid slices per dimension - """ - N = self._adjust_N(dim=integration_domain.shape[0], N=N) - - # Log with lazy to avoid redundant synchronisations with certain - # backends - logger.opt(lazy=True).debug( - "Creating a grid for {name} to integrate a function with {N} points over {d}.", - name=lambda: type(self).__name__, - N=lambda: str(N), - d=lambda: str(integration_domain), - ) - - # Create grid and assemble evaluation points - grid = IntegrationGrid(N, integration_domain) - - return grid.points, grid.h, grid._N - - def get_jit_compiled_integrate( - self, dim, N=None, integration_domain=None, backend=None - ): - """Create an integrate function where the performance-relevant steps except the integrand evaluation are JIT compiled. - Use this method only if the integrand cannot be compiled. - The compilation happens when the function is executed the first time. - With PyTorch, return values of different integrands passed to the compiled function must all have the same format, e.g. precision. - - Args: - dim (int): Dimensionality of the integration domain. - N (int, optional): Total number of sample points to use for the integration. See the integrate method documentation for more details. - integration_domain (list or backend tensor, optional): Integration domain, e.g. [[-1,1],[0,1]]. Defaults to [-1,1]^dim. It can also determine the numerical backend. - backend (string, optional): Numerical backend. Defaults to integration_domain's backend if it is a tensor and otherwise to the backend from the latest call to set_up_backend or "torch" for backwards compatibility. - - Returns: - function(fn, integration_domain): JIT compiled integrate function where all parameters except the integrand and domain are fixed - """ - # If N is None, use the minimal required number of points per dimension - if N is None: - N = self._get_minimal_N(dim) - - integration_domain = _setup_integration_domain(dim, integration_domain, backend) - self._check_inputs(dim=dim, N=N, integration_domain=integration_domain) - backend = infer_backend(integration_domain) - if backend in ["tensorflow", "jax"]: - # Tensorflow and JAX automatically recompile functions if - # the parameters change - if backend == "tensorflow": - if not hasattr(self, "_tf_jit_calculate_grid"): - import tensorflow as tf - - self._tf_jit_calculate_grid = tf.function( - self.calculate_grid, jit_compile=True - ) - self._tf_jit_calculate_result = tf.function( - self.calculate_result, jit_compile=True - ) - jit_calculate_grid = self._tf_jit_calculate_grid - jit_calculate_result = self._tf_jit_calculate_result - elif backend == "jax": - if not hasattr(self, "_jax_jit_calculate_grid"): - import jax - - self._jax_jit_calculate_grid = jax.jit( - self.calculate_grid, static_argnames=["N"] - ) - self._jax_jit_calculate_result = jax.jit( - self.calculate_result, - static_argnums=( - 1, - 2, - ), # dim and n_per_dim - ) - jit_calculate_grid = self._jax_jit_calculate_grid - jit_calculate_result = self._jax_jit_calculate_result - - def compiled_integrate(fn, integration_domain): - grid_points, hs, n_per_dim = jit_calculate_grid(N, integration_domain) - function_values, _ = self.evaluate_integrand(fn, grid_points) - return jit_calculate_result(function_values, dim, int(n_per_dim), hs) - - return compiled_integrate - - elif backend == "torch": - # Torch requires explicit tracing with example inputs. - def do_compile(example_integrand): - import torch - - # Define traceable first and third steps - def step1(integration_domain): - grid_points, hs, n_per_dim = self.calculate_grid( - N, integration_domain - ) - return ( - grid_points, - hs, - torch.Tensor([n_per_dim]), - ) # n_per_dim is constant - - dim = int(integration_domain.shape[0]) - - def step3(function_values, hs): - return self.calculate_result(function_values, dim, n_per_dim, hs) - - # Trace the first step - step1 = torch.jit.trace(step1, (integration_domain,)) - - # Get example input for the third step - grid_points, hs, n_per_dim = step1(integration_domain) - n_per_dim = int(n_per_dim) - function_values, _ = self.evaluate_integrand( - example_integrand, grid_points - ) - - # Trace the third step - # Avoid the warnings about a .grad attribute access of a - # non-leaf Tensor - if hs.requires_grad: - hs = hs.detach() - hs.requires_grad = True - if function_values.requires_grad: - function_values = function_values.detach() - function_values.requires_grad = True - step3 = torch.jit.trace(step3, (function_values, hs)) - - # Define a compiled integrate function - def compiled_integrate(fn, integration_domain): - grid_points, hs, _ = step1(integration_domain) - function_values, _ = self.evaluate_integrand(fn, grid_points) - result = step3(function_values, hs) - return result - - return compiled_integrate - - # Do the compilation when the returned function is executed the - # first time - compiled_func = [None] - - def lazy_compiled_integrate(fn, integration_domain): - if compiled_func[0] is None: - compiled_func[0] = do_compile(fn) - return compiled_func[0](fn, integration_domain) - - return lazy_compiled_integrate - - raise ValueError(f"Compilation not implemented for backend {backend}") diff --git a/torchquad/integration/simpson.py b/torchquad/integration/simpson.py index 02537644..68bea5c7 100644 --- a/torchquad/integration/simpson.py +++ b/torchquad/integration/simpson.py @@ -28,7 +28,7 @@ def integrate(self, fn, dim, N=None, integration_domain=None, backend=None): return super().integrate(fn, dim, N, integration_domain, backend) @staticmethod - def _apply_composite_rule(cur_dim_areas, dim, hs): + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): """Apply composite Simpson quadrature. cur_dim_areas will contain the areas per dimension """ diff --git a/torchquad/integration/trapezoid.py b/torchquad/integration/trapezoid.py index f7005644..476743e2 100644 --- a/torchquad/integration/trapezoid.py +++ b/torchquad/integration/trapezoid.py @@ -25,7 +25,7 @@ def integrate(self, fn, dim, N=1000, integration_domain=None, backend=None): return super().integrate(fn, dim, N, integration_domain, backend) @staticmethod - def _apply_composite_rule(cur_dim_areas, dim, hs): + def _apply_composite_rule(cur_dim_areas, dim, hs, domain): """Apply composite Trapezoid quadrature. cur_dim_areas will contain the areas per dimension @@ -37,8 +37,3 @@ def _apply_composite_rule(cur_dim_areas, dim, hs): ) cur_dim_areas = anp.sum(cur_dim_areas, axis=len(cur_dim_areas.shape) - 1) return cur_dim_areas - - @staticmethod - def _adjust_N(dim, N): - # Nothing to do for Trapezoid - return N diff --git a/torchquad/tests/gauss_test.py b/torchquad/tests/gauss_test.py new file mode 100644 index 00000000..78ae7157 --- /dev/null +++ b/torchquad/tests/gauss_test.py @@ -0,0 +1,160 @@ +import sys + +sys.path.append("../") + +from integration.gaussian import GaussLegendre +from helper_functions import ( + compute_integration_test_errors, + setup_test_for_backend, +) + + +def _run_gaussian_tests(backend, _precision): + """Test the integrate function in integration.gaussian for the given backend.""" + + gauss = GaussLegendre() + + # 1D Tests + N = 60 + + errors, funcs = compute_integration_test_errors( + gauss.integrate, + {"N": N, "dim": 1}, + integration_dim=1, + use_complex=True, + backend=backend, + ) + print(f"1D {gauss} Test passed. N: {N}, backend: {backend}, Errors: {errors}") + # Polynomials up to degree 2N-1 can be integrated almost exactly with gaussian. + for err, test_function in zip(errors, funcs): + assert test_function.get_order() > (2 * N - 1) or err < 5.5e-11 + for error in errors: + assert error < 6.33e-11 + + N = 2 # integration points, here 2 for order check (2 points should lead to almost 0 err for low order polynomials) + + errors, funcs = compute_integration_test_errors( + gauss.integrate, + {"N": N, "dim": 1}, + integration_dim=1, + use_complex=True, + backend=backend, + ) + print(f"1D {gauss} Test passed. N: {N}, backend: {backend}, Errors: {errors}") + # All polynomials up to degree 3 = 2N-1 should be 0, others should be good as well. + # If this breaks check if test functions in helper_functions changed. + for err, test_function in zip(errors, funcs): + assert test_function.get_order() > 3 or err < 2e-16 + for error in errors[:2]: + assert error < 1e-15 + + # 3D Tests + N = 60**3 + + errors, funcs = compute_integration_test_errors( + gauss.integrate, + {"N": N, "dim": 3}, + integration_dim=3, + use_complex=True, + backend=backend, + ) + print(f"3D {gauss} Test passed. N: {N}, backend: {backend}, Errors: {errors}") + for err, test_function in zip(errors, funcs): + assert test_function.get_order() > 1 or ( + err < 1e-12 if test_function.is_integrand_1d else err < 1e-11 + ) + for error in errors: + assert error < 6e-3 + + # Tensorflow crashes with an Op:StridedSlice UnimplementedError with 10 + # dimensions + if backend == "tensorflow": + print("Skipping tensorflow 10D tests") + return + + # 10D Tests + N = (60**3) * 3 + + errors, funcs = compute_integration_test_errors( + gauss.integrate, + {"N": N, "dim": 10}, + integration_dim=10, + use_complex=True, + backend=backend, + ) + print(f"10D {gauss} Test passed. N: {N}, backend: {backend}, Errors: {errors}") + for err, test_function in zip(errors, funcs): + assert ( + test_function.get_order() > 60 or err < 4e-09 + ) # poly order should be relatively high + for error in errors: + assert error < 1e-5 + + # JIT Tests + if backend != "numpy": + N = 60 + jit_integrate = None + + def integrate(*args, **kwargs): + # this function initializes the jit_integrate variable with a jit'ed integrate function + # which is then re-used on all other integrations (as is the point of JIT). + nonlocal jit_integrate + if jit_integrate is None: + jit_integrate = gauss.get_jit_compiled_integrate( + dim=1, N=N, backend=backend + ) + return jit_integrate(*args, **kwargs) + + errors, funcs = compute_integration_test_errors( + integrate, + {}, + integration_dim=1, + use_complex=True, + backend=backend, + filter_test_functions=lambda x: x.is_integrand_1d, + ) + + print( + f"1D Gaussian JIT Test passed. N: {N}, backend: {backend}, Errors: {errors}" + ) + # Polynomials up to degree 2N-1 can be integrated almost exactly with gaussian. + for err, test_function in zip(errors, funcs): + assert test_function.get_order() > (2 * N - 1) or err < 2e-10 + for error in errors: + assert error < 6.33e-11 + + jit_integrate = ( + None # set to None again so can be re-used with new integrand shape + ) + + errors, funcs = compute_integration_test_errors( + integrate, + {}, + integration_dim=1, + use_complex=True, + backend=backend, + filter_test_functions=lambda x: x.integrand_dims == [2, 2, 2], + ) + print( + f"1D Gaussian JIT Test passed for [2, 2, 2] dimensional integrands. N: {N}, backend: {backend}, Errors: {errors}" + ) + for err, test_function in zip(errors, funcs): + assert test_function.get_order() > 1 or err < 2e-10 + for error in errors: + assert error < 1e-5 + + +test_integrate_numpy = setup_test_for_backend(_run_gaussian_tests, "numpy", "float64") +test_integrate_torch = setup_test_for_backend(_run_gaussian_tests, "torch", "float64") +test_integrate_jax = setup_test_for_backend(_run_gaussian_tests, "jax", "float64") +test_integrate_tensorflow = setup_test_for_backend( + _run_gaussian_tests, "tensorflow", "float64" +) + + +if __name__ == "__main__": + # used to run this test individually + test_integrate_numpy() + test_integrate_torch() + test_integrate_jax() + test_integrate_tensorflow() diff --git a/torchquad/tests/gradient_test.py b/torchquad/tests/gradient_test.py index eae094e6..5d957f51 100644 --- a/torchquad/tests/gradient_test.py +++ b/torchquad/tests/gradient_test.py @@ -11,6 +11,7 @@ from integration.trapezoid import Trapezoid from integration.simpson import Simpson from integration.boole import Boole +from integration.gaussian import GaussLegendre from helper_functions import setup_test_for_backend @@ -170,7 +171,14 @@ def _run_gradient_tests(backend, dtype_name): maintain gradients and if the gradients are consistent and correct """ # Define integrators and numbers of evaluation points - integrators = [Trapezoid(), Simpson(), Boole(), MonteCarlo(), VEGAS()] + integrators = [ + Trapezoid(), + Simpson(), + Boole(), + MonteCarlo(), + VEGAS(), + GaussLegendre(), + ] Ns_1d = [149, 149, 149, 99997, 99997] Ns_2d = [549, 121, 81, 99997, 99997] for integrator, N_1d, N_2d in zip(integrators, Ns_1d, Ns_2d): diff --git a/torchquad/tests/integration_test_functions.py b/torchquad/tests/integration_test_functions.py index 14696b7b..9e2c63e5 100644 --- a/torchquad/tests/integration_test_functions.py +++ b/torchquad/tests/integration_test_functions.py @@ -239,8 +239,7 @@ def __init__( """Creates an n-dimensional exponential test function. Args: - expected_result (backend tensor): Expected result. Required to compute errors. - integration_dim (int, optional): Input dimension. Defaults to 1. + expected_result (backend tensor): Expected result. Required to compute errors. integration_dim (int, optional): Input dimension. Defaults to 1. domain (list or backend tensor, optional): Integration domain passed to _setup_integration_domain. is_complex (Boolean): If the test function contains complex numbers. Defaults to False. backend (string, optional): Numerical backend passed to _setup_integration_domain. @@ -275,8 +274,7 @@ def __init__( """Creates an n-dimensional sinusoidal test function. Args: - expected_result (backend tensor): Expected result. Required to compute errors. - integration_dim (int, optional): Input dimension. Defaults to 1. + expected_result (backend tensor): Expected result. Required to compute errors. integration_dim (int, optional): Input dimension. Defaults to 1. domain (list or backend tensor, optional): Integration domain passed to _setup_integration_domain. is_complex (Boolean): If the test function contains complex numbers. Defaults to False. backend (string, optional): Numerical backend passed to _setup_integration_domain. diff --git a/torchquad/tests/monte_carlo_test.py b/torchquad/tests/monte_carlo_test.py index a8b48896..1f0b81d7 100644 --- a/torchquad/tests/monte_carlo_test.py +++ b/torchquad/tests/monte_carlo_test.py @@ -117,7 +117,7 @@ def integrate(*args, **kwargs): assert error < 1e-2 assert errors[3] < 0.5 - assert errors[4] < 32.0 + assert errors[4] < 43.0 for error in errors[6:10]: assert error < 2e-2