diff --git a/benchmarks/distribution.py b/benchmarks/distribution.py index 131c8101df..8f91c621dc 100644 --- a/benchmarks/distribution.py +++ b/benchmarks/distribution.py @@ -50,9 +50,9 @@ def get_randvar(distribution_name): elif distribution_name == "symmatrixvar_normal": distribution = prob.Normal(mean=mean_2d_mat, cov=cov_2d_symkron) elif distribution_name == "operatorvar_normal": - distribution = prob.Normal(mean=mean_2d_linop) + distribution = prob.Normal(mean=mean_2d_linop, cov=cov_2d_symkron) - return prob.RandomVariable(distribution=distribution) + return distribution class Functions: @@ -72,13 +72,13 @@ def time_distr_functions(self, dist, property): """Times evaluation of the pdf, logpdf, cdf and logcdf.""" try: if property == "pdf": - self.randvar.distribution.pdf(x=self.eval_point) + self.randvar.pdf(x=self.eval_point) elif property == "logpdf": - self.randvar.distribution.pdf(x=self.eval_point) + self.randvar.pdf(x=self.eval_point) elif property == "cdf": - self.randvar.distribution.pdf(x=self.quantile) + self.randvar.pdf(x=self.quantile) elif property == "logcdf": - self.randvar.distribution.pdf(x=self.quantile) + self.randvar.pdf(x=self.quantile) except NotImplementedError: pass diff --git a/docs/source/introduction/quickstart.ipynb b/docs/source/introduction/quickstart.ipynb index ced98bdb1d..25b2a654f4 100644 --- a/docs/source/introduction/quickstart.ipynb +++ b/docs/source/introduction/quickstart.ipynb @@ -121,7 +121,7 @@ ], "source": [ "y = 2 * x + 1\n", - "print(f\"Mean: {y.mean()} and covariance: {y.cov()} of y.\")" + "print(f\"Mean: {y.mean} and covariance: {y.cov} of y.\")" ] }, { @@ -164,7 +164,7 @@ "\n", "# Solve using ProbNum\n", "x_rv, _, _, info = probnum.linalg.problinsolve(A, b)\n", - "print(x_rv.mean())" + "print(x_rv.mean)" ] }, { @@ -212,7 +212,7 @@ ], "source": [ "# Covariance of solution representing uncertainty\n", - "print(f\"Covariance matrix: \\n{x_rv.cov().todense()}\")" + "print(f\"Covariance matrix: \\n{x_rv.cov.todense()}\")" ] }, { @@ -591,7 +591,7 @@ ], "source": [ "# Plot of true solution, mean and samples\n", - "rvdict = {\"$x_*$\" : x, \"$\\mathbb{E}(\\mathsf{x})$\" : x_rv.mean(), \n", + "rvdict = {\"$x_*$\" : x, \"$\\mathbb{E}(\\mathsf{x})$\" : x_rv.mean, \n", " \"$\\mathsf{x}_1$\" : x_samples[0], \"$\\mathsf{x}_2$\" : x_samples[1], \"$\\mathsf{x}_3$\" : x_samples[2]}\n", "vmin = np.min([np.min(mat) for mat in list(rvdict.values())])\n", "vmax = np.max([np.max(mat) for mat in list(rvdict.values())])\n", @@ -731,7 +731,7 @@ ], "source": [ "# Covariance of solution representing uncertainty\n", - "print(f\"Covariance matrix: \\n{x_rv.cov().todense()}\")" + "print(f\"Covariance matrix: \\n{x_rv.cov.todense()}\")" ] }, { @@ -1110,7 +1110,7 @@ ], "source": [ "# Plot of true solution, mean and samples\n", - "rvdict = {\"$x_*$\" : x, \"$\\mathbb{E}(\\mathsf{x})$\" : x_rv.mean(), \n", + "rvdict = {\"$x_*$\" : x, \"$\\mathbb{E}(\\mathsf{x})$\" : x_rv.mean, \n", " \"$\\mathsf{x}_1$\" : x_samples[0], \"$\\mathsf{x}_2$\" : x_samples[1], \"$\\mathsf{x}_3$\" : x_samples[2]}\n", "\n", "fig, axes = plt.subplots(nrows=1, ncols=2 + n_samples, figsize=(8, 2.5), sharey=True)\n", diff --git a/docs/source/tutorials/adaptive_steps_odefilter.ipynb b/docs/source/tutorials/adaptive_steps_odefilter.ipynb index 5ebf1976c6..327bff5788 100644 --- a/docs/source/tutorials/adaptive_steps_odefilter.ipynb +++ b/docs/source/tutorials/adaptive_steps_odefilter.ipynb @@ -982,7 +982,7 @@ "ivp = lotkavolterra([0.0, 20.0], initrv)\n", "sol = probsolve_ivp(ivp, tol=1e-1)\n", "\n", - "means = sol.y.mean()\n", + "means = sol.y.mean\n", "\n", "plt.plot(sol.t, means[:, 0], label=\"Prey\")\n", "plt.plot(sol.t, means[:, 1], label=\"Predators\")\n", @@ -19320,7 +19320,7 @@ " which_prior=priors[idx],\n", " method=filters[jdx],\n", " )\n", - " ts, means, stds = sol.t, sol.y.mean(), sol.y.std()\n", + " ts, means, stds = sol.t, sol.y.mean, sol.y.std()\n", " ax[idx][jdx].plot(ts, means)\n", " ax[idx][jdx].fill_between(\n", " ts, means[:, 0] - 3 * stds[:, 0], means[:, 0] + 3 * stds[:, 0], alpha=0.25\n", @@ -65242,7 +65242,7 @@ " which_prior=priors[idx],\n", " method=filters[jdx],\n", " )\n", - " ts, means, stds = sol.t, sol.y.mean(), sol.y.std()\n", + " ts, means, stds = sol.t, sol.y.mean, sol.y.std()\n", " ax[idx][jdx].plot(ts, means)\n", " ax[idx][jdx].fill_between(\n", " ts, means[:, 0] - 3 * stds[:, 0], means[:, 0] + 3 * stds[:, 0], alpha=0.25\n", @@ -83739,7 +83739,7 @@ " which_prior=priors[idx],\n", " method=filters[jdx],\n", " )\n", - " ts, means, stds = sol.t, sol.y.mean(), sol.y.std()\n", + " ts, means, stds = sol.t, sol.y.mean, sol.y.std()\n", " ax[idx][jdx].plot(ts, means)\n", " ax[idx][jdx].fill_between(\n", " ts, means[:, 0] - 3 * stds[:, 0], means[:, 0] + 3 * stds[:, 0], alpha=0.25\n", diff --git a/docs/source/tutorials/galerkin_method.ipynb b/docs/source/tutorials/galerkin_method.ipynb index afdc749bc2..d22d9a4fac 100644 --- a/docs/source/tutorials/galerkin_method.ipynb +++ b/docs/source/tutorials/galerkin_method.ipynb @@ -690,7 +690,7 @@ "print(info)\n", "\n", "# Save solution to file\n", - "np.save(data_path + \"solution_res{}.npy\".format(mesh_resolution_coarse), uhat.mean())" + "np.save(data_path + \"solution_res{}.npy\".format(mesh_resolution_coarse), uhat.mean)" ] }, { @@ -3350,8 +3350,8 @@ "\n", "# Interpolation levels and color scaling\n", "interpol_levels = None\n", - "vmin_tricont = np.min(uhat.mean().ravel())\n", - "vmax_tricont = np.max(uhat.mean().ravel())\n", + "vmin_tricont = np.min(uhat.mean.ravel())\n", + "vmax_tricont = np.max(uhat.mean.ravel())\n", "\n", "# Sample from Posterior\n", "np.random.seed(42)\n", @@ -3368,7 +3368,7 @@ "\n", "# Estimated solution\n", "fig.add_subplot(gs00[0, 0])\n", - "plt.tricontourf(triang, uhat.mean().ravel(), levels=interpol_levels, vmin=vmin_tricont, vmax=vmax_tricont)\n", + "plt.tricontourf(triang, uhat.mean.ravel(), levels=interpol_levels, vmin=vmin_tricont, vmax=vmax_tricont)\n", "plt.triplot(triang, linewidth=2.5, color=\"white\", alpha=.25)\n", "plt.title(\"$\\mathbb{E}[\\\\bm{\\\\mathsf{u}}]$\", fontsize=24)\n", "plt.axis('scaled')\n", @@ -3732,8 +3732,8 @@ ], "source": [ "# Plot linear operators\n", - "matdict = {\"$A$\" : A.todense(), \"$\\mathbb{E}(\\mathsf{A})$\" : Ahat.mean().todense(), \n", - " \"$\\mathbb{E}(\\mathsf{H})$\" : Ainvhat.mean().todense()}\n", + "matdict = {\"$A$\" : A.todense(), \"$\\mathbb{E}(\\mathsf{A})$\" : Ahat.mean.todense(), \n", + " \"$\\mathbb{E}(\\mathsf{H})$\" : Ainvhat.mean.todense()}\n", "\n", "# Set imshow limits uniform across matrices\n", "vmax = np.max([np.max(np.abs(mat)) for mat in list(matdict.values())])\n", @@ -3792,7 +3792,7 @@ "rel_error_u = error_u / np.linalg.norm(u)\n", "\n", "# Scaled error\n", - "Sigma = uhat.cov().todense()\n", + "Sigma = uhat.cov.todense()\n", "Sigma_pseudoinv = np.linalg.pinv(Sigma, hermitian=True)\n", "scaled_error_u = np.real_if_close(scipy.linalg.sqrtm(Sigma_pseudoinv) @ error_u, tol=10**8)" ] diff --git a/docs/source/tutorials/linear_operators.ipynb b/docs/source/tutorials/linear_operators.ipynb index 8ce971859f..452d2892a3 100644 --- a/docs/source/tutorials/linear_operators.ipynb +++ b/docs/source/tutorials/linear_operators.ipynb @@ -1045,7 +1045,7 @@ ], "source": [ "# Plot\n", - "rvdict = {\"$\\mathbb{E}(\\mathsf{Y})$\" : Y.mean().todense(), \n", + "rvdict = {\"$\\mathbb{E}(\\mathsf{Y})$\" : Y.mean.todense(), \n", " \"$\\mathsf{Y}_1$\" : Ysamples[2], \"$\\mathsf{Y}_2$\" : Ysamples[1], \"$\\mathsf{Y}_3$\" : Ysamples[2]}\n", "vmin = np.min([np.min(mat) for mat in list(rvdict.values())])\n", "vmax = np.max([np.max(mat) for mat in list(rvdict.values())])\n", diff --git a/docs/source/tutorials/linear_systems.ipynb b/docs/source/tutorials/linear_systems.ipynb index 6fcf568cae..24b47d2f80 100644 --- a/docs/source/tutorials/linear_systems.ipynb +++ b/docs/source/tutorials/linear_systems.ipynb @@ -758,7 +758,7 @@ "Ainv_samples = Ainv.sample(3)\n", "\n", "# Plot\n", - "rvdict = {\"$A$\" : A, \"$\\mathbb{E}(\\mathsf{A})$\" : Ahat.mean().todense(),\n", + "rvdict = {\"$A$\" : A, \"$\\mathbb{E}(\\mathsf{A})$\" : Ahat.mean.todense(),\n", " \"$\\mathsf{A}_1$\" : Ahat_samples[0], \"$\\mathsf{A}_2$\" : Ahat_samples[1]}\n", "\n", "fig, axes = plt.subplots(nrows=1, ncols=len(rvdict), figsize=(10, 3), sharey=True)\n", @@ -1140,7 +1140,7 @@ ], "source": [ "# Plot\n", - "rvdict = {\"$A^{-1}\": np.linalg.inv(A), \"$\\mathbb{E}(\\mathsf{H})\" : Ainv.mean().todense(), \n", + "rvdict = {\"$A^{-1}\": np.linalg.inv(A), \"$\\mathbb{E}(\\mathsf{H})\" : Ainv.mean.todense(), \n", " \"$\\mathsf{H}_1\" : Ainv_samples[0], \"$\\mathsf{H}_2\" : Ainv_samples[1]}\n", "\n", "fig, axes = plt.subplots(nrows=2, ncols=len(rvdict), figsize=(10, 6), sharey=True)\n", diff --git a/docs/source/tutorials/uncertainties_odefilter.ipynb b/docs/source/tutorials/uncertainties_odefilter.ipynb index 4b939ed95d..10d7cacf77 100644 --- a/docs/source/tutorials/uncertainties_odefilter.ipynb +++ b/docs/source/tutorials/uncertainties_odefilter.ipynb @@ -201417,7 +201417,7 @@ } ], "source": [ - "means, stds = sol.y.mean(), sol.y.std()\n", + "means, stds = sol.y.mean, sol.y.std()\n", "\n", "fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)\n", "ax1.plot(sol.t, means[:, 0], label=\"x1\")\n", @@ -402823,7 +402823,7 @@ } ], "source": [ - "means, stds = sol.y.mean(), sol.y.std()\n", + "means, stds = sol.y.mean, sol.y.std()\n", "\n", "fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)\n", "ax1.plot(sol.t, means[:, 0], label=\"x1\")\n", diff --git a/src/probnum/diffeq/ode/ivp.py b/src/probnum/diffeq/ode/ivp.py index 756798e370..c6b1ab5119 100644 --- a/src/probnum/diffeq/ode/ivp.py +++ b/src/probnum/diffeq/ode/ivp.py @@ -61,7 +61,7 @@ def hess(t, y): return log_hess(t, y, params) def sol(t): - return log_sol(t, params, initrv.mean()) + return log_sol(t, params, initrv.mean) return IVP(timespan, initrv, rhs, jac, hess, sol) @@ -264,7 +264,7 @@ class IVP(ODE): >>> from probnum.diffeq import IVP >>> rhsfun = lambda t, y, **kwargs: 2.0*y >>> from probnum.prob import Dirac, Normal, RandomVariable - >>> initrv = RandomVariable(distribution=Dirac(0.1)) + >>> initrv = Dirac(0.1) >>> timespan = (0, 10) >>> ivp = IVP(timespan, initrv, rhsfun) >>> print(ivp.rhs(0., 2.)) @@ -274,7 +274,7 @@ class IVP(ODE): >>> print(ivp.t0) 0 - >>> initrv = RandomVariable(distribution=Normal(0.1, 1.0)) + >>> initrv = Normal(0.1, 1.0) >>> ivp = IVP(timespan, initrv, rhsfun) >>> jac = lambda t, y, **kwargs: 2.0 >>> ivp = IVP(timespan, initrv, rhs=rhsfun, jac=jac) @@ -310,7 +310,7 @@ def ndim(self): Depends on the mean of the initial random variable. """ - if np.isscalar(self.initrv.mean()): + if np.isscalar(self.initrv.mean): return 1 else: - return len(self.initrv.mean()) + return len(self.initrv.mean) diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index 309a9b4e0e..c94e5784c2 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -7,8 +7,7 @@ from probnum.filtsmooth import ExtendedKalman, UnscentedKalman from probnum.filtsmooth.statespace.discrete import DiscreteGaussianModel -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal, Dirac +from probnum.prob import Normal, Dirac def ivp2ekf0(ivp, prior, evlvar): @@ -167,10 +166,10 @@ def _initialdistribution(ivp, prior): Note that the projection matrices :math:`H_0` and :math:`H_1` become :math:`H_0 P^{-1}` and :math:`H_1 P^{-1}`. """ - if not issubclass(type(ivp.initrv.distribution), Normal): - if not issubclass(type(ivp.initrv.distribution), Dirac): + if not issubclass(type(ivp.initrv), Normal): + if not issubclass(type(ivp.initrv), Dirac): raise RuntimeError("Initial distribution not Normal nor Dirac") - x0 = ivp.initialdistribution.mean() + x0 = ivp.initialdistribution.mean dx0 = ivp.rhs(ivp.t0, x0) ddx0 = _ddx(ivp.t0, x0, ivp) dddx0 = _dddx(ivp.t0, x0, ivp) @@ -194,17 +193,17 @@ def _initialdistribution(ivp, prior): projmat = np.hstack((h0.T, h1.T, h2.T, h3.T)).T data = np.hstack((x0, dx0, ddx0, dddx0)) _size = 4 - largecov = np.kron(np.eye(_size), ivp.initialdistribution.cov()) + largecov = np.kron(np.eye(_size), ivp.initialdistribution.cov) s = projmat @ initcov @ projmat.T + largecov crosscov = initcov @ projmat.T newmean = crosscov @ np.linalg.solve(s, data) newcov = initcov - (crosscov @ np.linalg.solve(s.T, crosscov.T)).T - return RandomVariable(distribution=Normal(newmean, newcov)) + return Normal(newmean, newcov) def _initialdistribution_no_precond(ivp, prior): - - x0 = ivp.initialdistribution.mean() + """ """ + x0 = ivp.initialdistribution.mean dx0 = ivp.rhs(ivp.t0, x0) ddx0 = _ddx(ivp.t0, x0, ivp) h0 = prior.proj2coord(coord=0) @@ -221,7 +220,7 @@ def _initialdistribution_no_precond(ivp, prior): crosscov = initcov @ projmat.T # @ np.linalg.inv(s) newmean = crosscov @ np.linalg.solve(s, data) newcov = initcov - (crosscov @ np.linalg.solve(s, crosscov)).T - return RandomVariable(distribution=Normal(newmean, newcov)) + return Normal(newmean, newcov) def _ddx(t, x, ivp): diff --git a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py index acde3c7541..6d26e5109d 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py @@ -1,8 +1,7 @@ import warnings import numpy as np -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal from probnum.diffeq import odesolver from probnum.diffeq.odefiltsmooth.prior import ODEPrior from probnum.diffeq.odesolution import ODESolution @@ -67,7 +66,7 @@ def solve(self, firststep, **kwargs): ) errorest, ssq = self._estimate_error( - filt_rv.mean(), crosscov, meas_cov, meas_mean + filt_rv.mean, crosscov, meas_cov, meas_mean ) if self.steprule.is_accepted(step, errorest): @@ -81,10 +80,7 @@ def solve(self, firststep, **kwargs): step = self._suggest_step(step, errorest) step = min(step, self.ivp.tmax - t) - rvs = [ - RandomVariable(distribution=Normal(rv.mean(), ssqest * rv.cov())) - for rv in rvs - ] + rvs = [Normal(rv.mean, ssqest * rv.cov) for rv in rvs] return ODESolution(times, rvs, self) @@ -116,9 +112,9 @@ def odesmooth(self, filter_solution, **kwargs): def undo_preconditioning(self, rv): ipre = self.gfilt.dynamicmodel.invprecond - newmean = ipre @ rv.mean() - newcov = ipre @ rv.cov() @ ipre.T - newrv = RandomVariable(distribution=Normal(newmean, newcov)) + newmean = ipre @ rv.mean + newcov = ipre @ rv.cov @ ipre.T + newrv = Normal(newmean, newcov) return newrv def _estimate_error(self, currmn, ccest, covest, mnest): diff --git a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py index 3969ee54eb..dee13f96ae 100644 --- a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py @@ -153,8 +153,8 @@ def probsolve_ivp( y : :obj:`list` of :obj:`RandomVariable`, length=N Discrete-time solution at times :math:`t_1, ..., t_N`, as a list of random variables. - The means and covariances can be accessed with ``solution.y.mean()`` - and ``solution.y.cov()``. + The means and covariances can be accessed with ``solution.y.mean`` + and ``solution.y.cov``. See Also -------- @@ -182,8 +182,8 @@ def probsolve_ivp( Examples -------- >>> from probnum.diffeq import logistic, probsolve_ivp - >>> from probnum.prob import RandomVariable, Dirac, Normal - >>> initrv = RandomVariable(distribution=Dirac(0.15)) + >>> from probnum.prob import Dirac, Normal + >>> initrv = Dirac(0.15) >>> ivp = logistic(timespan=[0., 1.5], initrv=initrv, params=(4, 1)) >>> solution = probsolve_ivp(ivp, method="ekf0", step=0.1) >>> print(solution.y.mean()) @@ -204,7 +204,7 @@ def probsolve_ivp( [0.97577054] [0.9831919 ]] - >>> initrv = RandomVariable(distribution=Dirac(0.15)) + >>> initrv = Dirac(0.15) >>> ivp = logistic(timespan=[0., 1.5], initrv=initrv, params=(4, 1)) >>> solution = probsolve_ivp(ivp, method="eks1", which_prior="ioup3", step=0.1) >>> print(solution.y.mean()) diff --git a/src/probnum/diffeq/odefiltsmooth/prior.py b/src/probnum/diffeq/odefiltsmooth/prior.py index 8aaafd9b33..86a4f7cf4b 100644 --- a/src/probnum/diffeq/odefiltsmooth/prior.py +++ b/src/probnum/diffeq/odefiltsmooth/prior.py @@ -14,7 +14,7 @@ from scipy.special import factorial # vectorised factorial for IBM-Q(h) from probnum.filtsmooth.statespace.continuous import LTISDEModel -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal class ODEPrior(LTISDEModel): @@ -267,13 +267,13 @@ def chapmankolmogorov(self, start, stop, step, randvar, *args, **kwargs): "step" variable is obsolent here and is ignored. """ - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov ah = self._trans_ibm(start, stop) qh = self._transdiff_ibm(start, stop) mpred = ah @ mean crosscov = covar @ ah.T cpred = ah @ crosscov + qh - return RandomVariable(distribution=Normal(mpred, cpred)), crosscov + return Normal(mpred, cpred), crosscov def _trans_ibm(self, start, stop): """ diff --git a/src/probnum/diffeq/odesolution.py b/src/probnum/diffeq/odesolution.py index 3288b49d7a..90041e3108 100644 --- a/src/probnum/diffeq/odesolution.py +++ b/src/probnum/diffeq/odesolution.py @@ -5,7 +5,7 @@ Provides dense output by being callable. Can function values can also be accessed by indexing. """ -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal from probnum.prob._randomvariablelist import _RandomVariableList from probnum.filtsmooth.filtsmoothposterior import FiltSmoothPosterior from probnum.filtsmooth import KalmanPosterior @@ -35,8 +35,8 @@ class ODESolution(FiltSmoothPosterior): Examples -------- >>> from probnum.diffeq import logistic, probsolve_ivp - >>> from probnum.prob import RandomVariable, Dirac, Normal - >>> initrv = RandomVariable(distribution=Dirac(0.15)) + >>> from probnum.prob import Dirac, Normal + >>> initrv = Dirac(0.15) >>> ivp = logistic(timespan=[0., 1.5], initrv=initrv, params=(4, 1)) >>> solution = probsolve_ivp(ivp, method="ekf0", step=0.1) >>> # Mean of the discrete-time solution @@ -62,11 +62,11 @@ class ODESolution(FiltSmoothPosterior): [0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1. 1.1 1.2 1.3 1.4 1.5] >>> # Individual entries of the discrete-time solution can be accessed with >>> print(solution[5]) - <(1,) RandomVariable with dtype=> - >>> print(solution[5].mean()) + <(1,) Normal with dtype=float64> + >>> print(solution[5].mean) [0.55945475] >>> # Evaluate the continuous-time solution at a new time point t=0.65 - >>> print(solution(0.65).mean()) + >>> print(solution(0.65).mean) [0.69702861] """ @@ -77,9 +77,9 @@ def __init__(self, times, rvs, solver): def _proj_normal_rv(self, rv, coord): """Projection of a normal RV, e.g. to map 'states' to 'function values'.""" q = self._solver.prior.ordint - new_mean = rv.mean()[coord :: (q + 1)] - new_cov = rv.cov()[coord :: (q + 1), coord :: (q + 1)] - return RandomVariable(distribution=Normal(new_mean, new_cov)) + new_mean = rv.mean[coord :: (q + 1)] + new_cov = rv.cov[coord :: (q + 1), coord :: (q + 1)] + return Normal(new_mean, new_cov) @property def t(self): @@ -93,7 +93,7 @@ def y(self): Probabilistic discrete-time solution at times :math:`t_1, ..., t_N`, as a list of random variables. - To return means and covariances use ``y.mean()`` and ``y.cov()``. + To return means and covariances use ``y.mean`` and ``y.cov``. """ function_rvs = [self._proj_normal_rv(rv, 0) for rv in self._state_rvs] return _RandomVariableList(function_rvs) diff --git a/src/probnum/filtsmooth/bayesfiltsmooth.py b/src/probnum/filtsmooth/bayesfiltsmooth.py index 61bb8f17f6..562a7ebdb1 100644 --- a/src/probnum/filtsmooth/bayesfiltsmooth.py +++ b/src/probnum/filtsmooth/bayesfiltsmooth.py @@ -85,4 +85,4 @@ def initialdistribution(self): """ Convenience function for accessing ``self.initdist``. """ - return self.initrv.distribution + return self.initrv diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py index e552e8246b..c584025c6b 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py @@ -5,8 +5,7 @@ import numpy as np from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal from probnum.filtsmooth.statespace import ( ContinuousModel, DiscreteModel, @@ -98,7 +97,7 @@ def __init__(self, dynamod, measmod, initrv, **kwargs): super().__init__(dynamod, measmod, initrv) def predict(self, start, stop, randvar, **kwargs): - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) diffmat = self.dynamod.diffusionmatrix(start, **kwargs) @@ -106,7 +105,7 @@ def predict(self, start, stop, randvar, **kwargs): mpred = self.dynamod.dynamics(start, mean, **kwargs) crosscov = covar @ jacob.T cpred = jacob @ crosscov + diffmat - return RandomVariable(distribution=Normal(mpred, cpred)), crosscov + return Normal(mpred, cpred), crosscov def update(self, time, randvar, data, **kwargs): return _discrete_extkalman_update( @@ -115,7 +114,7 @@ def update(self, time, randvar, data, **kwargs): def _discrete_extkalman_update(time, randvar, data, measmod, **kwargs): - mpred, cpred = randvar.mean(), randvar.cov() + mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) jacob = measmod.jacobian(time, mpred, **kwargs) @@ -125,5 +124,5 @@ def _discrete_extkalman_update(time, randvar, data, measmod, **kwargs): ccest = cpred @ jacob.T mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) - updated_rv = RandomVariable(distribution=Normal(mean, cov)) + updated_rv = Normal(mean, cov) return updated_rv, covest, ccest, meanest diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py index beeaea3e1f..bbd2f97ade 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal from probnum.filtsmooth.bayesfiltsmooth import BayesFiltSmooth from probnum.filtsmooth.gaussfiltsmooth.kalmanposterior import KalmanPosterior @@ -17,7 +17,7 @@ class GaussFiltSmooth(BayesFiltSmooth, ABC): def __init__(self, dynamod, measmod, initrv): """Check that the initial distribution is Gaussian.""" - if not issubclass(type(initrv.distribution), Normal): + if not issubclass(type(initrv), Normal): raise ValueError( "Gaussian filters/smoothers need initial " "random variables with Normal distribution." @@ -157,9 +157,9 @@ def smooth_step(self, unsmoothed_rv, pred_rv, smoothed_rv, crosscov): returned by predict(). """ crosscov = np.asarray(crosscov) - initmean, initcov = unsmoothed_rv.mean(), unsmoothed_rv.cov() - predmean, predcov = pred_rv.mean(), pred_rv.cov() - currmean, currcov = smoothed_rv.mean(), smoothed_rv.cov() + initmean, initcov = unsmoothed_rv.mean, unsmoothed_rv.cov + predmean, predcov = pred_rv.mean, pred_rv.cov + currmean, currcov = smoothed_rv.mean, smoothed_rv.cov if np.isscalar(predmean) and np.isscalar(predcov): predmean = predmean * np.ones(1) predcov = predcov * np.eye(1) @@ -168,7 +168,7 @@ def smooth_step(self, unsmoothed_rv, pred_rv, smoothed_rv, crosscov): firstsolve = crosscov @ np.linalg.solve(predcov, currcov - predcov) secondsolve = crosscov @ np.linalg.solve(predcov, firstsolve.T) newcov = initcov + secondsolve.T - return RandomVariable(distribution=Normal(newmean, newcov)) + return Normal(newmean, newcov) @abstractmethod def predict(self, start, stop, randvar, **kwargs): diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py index fc7eade197..c2b9e82d0f 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py @@ -5,7 +5,7 @@ import numpy as np from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal from probnum.filtsmooth.statespace import ( ContinuousModel, DiscreteModel, @@ -111,7 +111,7 @@ def __init__(self, dynamod, measmod, initrv): def predict(self, start, stop, randvar, **kwargs): """Prediction step for discrete-discrete Kalman filtering.""" - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) dynamat = self.dynamicmodel.dynamicsmatrix(start, **kwargs) @@ -120,7 +120,7 @@ def predict(self, start, stop, randvar, **kwargs): mpred = dynamat @ mean + forcevec ccpred = covar @ dynamat.T cpred = dynamat @ ccpred + diffmat - return RandomVariable(distribution=Normal(mpred, cpred)), ccpred + return Normal(mpred, cpred), ccpred def update(self, time, randvar, data, **kwargs): """Update step of discrete Kalman filtering""" @@ -131,7 +131,7 @@ def update(self, time, randvar, data, **kwargs): def _discrete_kalman_update(time, randvar, data, measurementmodel, **kwargs): """Discrete Kalman update.""" - mpred, cpred = randvar.mean(), randvar.cov() + mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) measmat = measurementmodel.dynamicsmatrix(time, **kwargs) @@ -141,4 +141,4 @@ def _discrete_kalman_update(time, randvar, data, measurementmodel, **kwargs): ccest = cpred @ measmat.T mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) - return (RandomVariable(distribution=Normal(mean, cov)), covest, ccest, meanest) + return (Normal(mean, cov), covest, ccest, meanest) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py index 13574f003f..e0ddcc9287 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py @@ -8,7 +8,7 @@ import numpy as np from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal from probnum.filtsmooth.gaussfiltsmooth.unscentedtransform import UnscentedTransform from probnum.filtsmooth.statespace import ( ContinuousModel, @@ -112,7 +112,7 @@ def _predict_linear(self, start, randvar, **kwargs): """ Basic Kalman update because model is linear. """ - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) dynamat = self.dynamod.dynamicsmatrix(start, **kwargs) @@ -121,13 +121,13 @@ def _predict_linear(self, start, randvar, **kwargs): mpred = dynamat @ mean + forcevec crosscov = covar @ dynamat.T cpred = dynamat @ crosscov + diffmat - return RandomVariable(distribution=Normal(mpred, cpred)), crosscov + return Normal(mpred, cpred), crosscov def _predict_nonlinear(self, start, randvar, **kwargs): """ Executes unscented transform! """ - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) sigmapts = self.ut.sigma_points(mean, covar) @@ -136,7 +136,7 @@ def _predict_nonlinear(self, start, randvar, **kwargs): mpred, cpred, crosscov = self.ut.estimate_statistics( proppts, sigmapts, diffmat, mean ) - return RandomVariable(distribution=Normal(mpred, cpred)), crosscov + return Normal(mpred, cpred), crosscov def update(self, time, randvar, data, **kwargs): return _discrete_unskalman_update( @@ -152,7 +152,7 @@ def _discrete_unskalman_update(time, randvar, data, measmod, ut, **kwargs): def _update_discrete_linear(time, randvar, data, measmod, **kwargs): - mpred, cpred = randvar.mean(), randvar.cov() + mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) measmat = measmod.dynamicsmatrix(time, **kwargs) @@ -162,11 +162,11 @@ def _update_discrete_linear(time, randvar, data, measmod, **kwargs): ccest = cpred @ measmat.T mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) - return RandomVariable(distribution=Normal(mean, cov)), covest, ccest, meanest + return Normal(mean, cov), covest, ccest, meanest def _update_discrete_nonlinear(time, randvar, data, measmod, ut, **kwargs): - mpred, cpred = randvar.mean(), randvar.cov() + mpred, cpred = randvar.mean, randvar.cov if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) sigmapts = ut.sigma_points(mpred, cpred) @@ -175,4 +175,4 @@ def _update_discrete_nonlinear(time, randvar, data, measmod, ut, **kwargs): meanest, covest, ccest = ut.estimate_statistics(proppts, sigmapts, meascov, mpred) mean = mpred + ccest @ np.linalg.solve(covest, data - meanest) cov = cpred - ccest @ np.linalg.solve(covest.T, ccest.T) - return RandomVariable(distribution=Normal(mean, cov)), covest, ccest, meanest + return Normal(mean, cov), covest, ccest, meanest diff --git a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py index 71c89016db..ab83d8e9a0 100644 --- a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py +++ b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py @@ -10,8 +10,7 @@ import numpy as np import scipy.linalg from probnum.filtsmooth.statespace.continuous import continuousmodel -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal __all__ = ["LinearSDEModel", "LTISDEModel"] @@ -96,20 +95,20 @@ def chapmankolmogorov(self, start, stop, step, randvar, **kwargs): By default, we assume that ``randvar`` is Gaussian. """ - if not issubclass(type(randvar.distribution), Normal): + if not issubclass(type(randvar), Normal): errormsg = ( "Closed form solution for Chapman-Kolmogorov " "equations in linear SDE models is only " "available for Gaussian initial conditions." ) raise ValueError(errormsg) - mean, covar = randvar.mean(), randvar.cov() + mean, covar = randvar.mean, randvar.cov time = start while time < stop: meanincr, covarincr = self._increment(time, mean, covar, **kwargs) mean, covar = mean + step * meanincr, covar + step * covarincr time = time + step - return RandomVariable(distribution=Normal(mean, covar)), None + return Normal(mean, covar), None def _increment(self, time, mean, covar, **kwargs): """ @@ -205,13 +204,13 @@ def chapmankolmogorov(self, start, stop, step, randvar, **kwargs): and Eq. 6.41 and Eq. 6.42 in Applied SDEs. """ - mean, cov = randvar.mean(), randvar.cov() + mean, cov = randvar.mean, randvar.cov if np.isscalar(mean) and np.isscalar(cov): mean, cov = mean * np.ones(1), cov * np.eye(1) increment = stop - start newmean = self._predict_mean(increment, mean, **kwargs) newcov, crosscov = self._predict_covar(increment, cov, **kwargs) - return RandomVariable(distribution=Normal(newmean, newcov)), crosscov + return Normal(newmean, newcov), crosscov def _predict_mean(self, h, mean, **kwargs): """ diff --git a/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py b/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py index 9f3e139ed2..0d907c4587 100644 --- a/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py +++ b/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py @@ -3,8 +3,7 @@ x_{i+1} = N(g(i, x_i), S(i)) """ -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal from probnum.filtsmooth.statespace.discrete import discretemodel @@ -76,7 +75,7 @@ def sample(self, time, state, **kwargs): """ dynavl = self.dynamics(time, state, **kwargs) diffvl = self.diffusionmatrix(time, **kwargs) - rv = RandomVariable(distribution=Normal(dynavl, diffvl)) + rv = Normal(dynavl, diffvl) return rv.sample() def pdf(self, loc, time, state, **kwargs): diff --git a/src/probnum/linalg/linearsolvers/linearsolvers.py b/src/probnum/linalg/linearsolvers/linearsolvers.py index 217256be88..38520f8846 100644 --- a/src/probnum/linalg/linearsolvers/linearsolvers.py +++ b/src/probnum/linalg/linearsolvers/linearsolvers.py @@ -453,15 +453,15 @@ def _init_solver(A, b, A0, Ainv0, x0, assume_A): "Cannot use prior uncertainty on both the matrix (inverse) and the " "solution. The latter will be ignored." ) - x0 = x0.mean() + x0 = x0.mean # Extract information from priors # System matrix is symmetric if isinstance(A0, prob.RandomVariable): - if isinstance(A0.cov(), linops.SymmetricKronecker) and "sym" not in assume_A: + if isinstance(A0.cov, linops.SymmetricKronecker) and "sym" not in assume_A: assume_A += "sym" if isinstance(Ainv0, prob.RandomVariable): - if isinstance(Ainv0.cov(), linops.SymmetricKronecker) and "sym" not in assume_A: + if isinstance(Ainv0.cov, linops.SymmetricKronecker) and "sym" not in assume_A: assume_A += "sym" # System matrix is NOT stochastic if ( diff --git a/src/probnum/linalg/linearsolvers/matrixbased.py b/src/probnum/linalg/linearsolvers/matrixbased.py index 9fe3225f4d..5abbd955c5 100644 --- a/src/probnum/linalg/linearsolvers/matrixbased.py +++ b/src/probnum/linalg/linearsolvers/matrixbased.py @@ -384,8 +384,8 @@ def _get_prior_params(self, A0, Ainv0, x0, b): # Prior on Ainv specified if not isinstance(A0, prob.RandomVariable) and Ainv0 is not None: if isinstance(Ainv0, prob.RandomVariable): - Ainv0_mean = Ainv0.mean() - Ainv0_covfactor = Ainv0.cov().A + Ainv0_mean = Ainv0.mean + Ainv0_covfactor = Ainv0.cov.A else: self.is_calib_covclass = True Ainv0_mean = Ainv0 @@ -394,7 +394,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): if A0 is not None: A0_mean = A0 elif isinstance(Ainv0, prob.RandomVariable): - A0_mean = Ainv0.mean().inv() + A0_mean = Ainv0.mean.inv() else: A0_mean = Ainv0.inv() except AttributeError: @@ -403,7 +403,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): "This operation is computationally costly! Specify an inverse " "prior (mean) instead." ) - A0_mean = np.linalg.inv(Ainv0.mean()) + A0_mean = np.linalg.inv(Ainv0.mean) except NotImplementedError: A0_mean = linops.Identity(self.n) warnings.warn( @@ -417,8 +417,8 @@ def _get_prior_params(self, A0, Ainv0, x0, b): # Prior on A specified elif A0 is not None and not isinstance(Ainv0, prob.RandomVariable): if isinstance(A0, prob.RandomVariable): - A0_mean = A0.mean() - A0_covfactor = A0.cov().A + A0_mean = A0.mean + A0_covfactor = A0.cov.A else: self.is_calib_covclass = True A0_mean = A0 @@ -427,7 +427,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): if Ainv0 is not None: Ainv0_mean = Ainv0 elif isinstance(A0, prob.RandomVariable): - Ainv0_mean = A0.mean().inv() + Ainv0_mean = A0.mean.inv() else: Ainv0_mean = A0.inv() except AttributeError: @@ -436,7 +436,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): "This operation is computationally costly! " "Specify an inverse prior (mean)." ) - Ainv0_mean = np.linalg.inv(A0.mean()) + Ainv0_mean = np.linalg.inv(A0.mean) except NotImplementedError: Ainv0_mean = linops.Identity(self.n) warnings.warn( @@ -450,10 +450,10 @@ def _get_prior_params(self, A0, Ainv0, x0, b): elif isinstance(A0, prob.RandomVariable) and isinstance( Ainv0, prob.RandomVariable ): - A0_mean = A0.mean() - A0_covfactor = A0.cov().A - Ainv0_mean = Ainv0.mean() - Ainv0_covfactor = Ainv0.cov().A + A0_mean = A0.mean + A0_covfactor = A0.cov.A + Ainv0_mean = Ainv0.mean + Ainv0_covfactor = Ainv0.cov.A return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor else: raise NotImplementedError @@ -756,19 +756,12 @@ def _matvec(x): _Ainv_covfactor = self.Ainv_covfactor0 # Create output random variables - A = prob.RandomVariable( - shape=(self.n, self.n), - dtype=float, - distribution=prob.Normal( - mean=self.A_mean, cov=linops.SymmetricKronecker(A=_A_covfactor) - ), + A = prob.random_variable.Normal( + mean=self.A_mean, cov=linops.SymmetricKronecker(A=_A_covfactor) ) - Ainv = prob.RandomVariable( - shape=(self.n, self.n), - dtype=float, - distribution=prob.Normal( - mean=self.Ainv_mean, cov=linops.SymmetricKronecker(A=_Ainv_covfactor) - ), + + Ainv = prob.random_variable.Normal( + mean=self.Ainv_mean, cov=linops.SymmetricKronecker(A=_Ainv_covfactor) ) # Induced distribution on x via Ainv # Exp(x) = Ainv b, Cov(x) = 1/2 (W b'Wb + Wbb'W) @@ -782,11 +775,7 @@ def _mv(x): shape=(self.n, self.n), dtype=float, matvec=_mv, matmat=_mv ) - x = prob.RandomVariable( - shape=(self.n,), - dtype=float, - distribution=prob.Normal(mean=self.x_mean.ravel(), cov=cov_op), - ) + x = prob.random_variable.Normal(mean=self.x_mean.ravel(), cov=cov_op) # Compute trace of solution covariance: tr(Cov(x)) self.trace_sol_cov = np.real_if_close( @@ -1153,7 +1142,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): """ # Right hand side mean - b_mean = b.sample(1) # TODO: build prior model for rhs and change to b.mean() + b_mean = b.sample(1) # TODO: build prior model for rhs and change to b.mean # No matrix priors specified if A0 is None and Ainv0 is None: @@ -1189,8 +1178,8 @@ def _get_prior_params(self, A0, Ainv0, x0, b): # Prior on Ainv specified if not isinstance(A0, prob.RandomVariable) and Ainv0 is not None: if isinstance(Ainv0, prob.RandomVariable): - Ainv0_mean = Ainv0.mean() - Ainv0_covfactor = Ainv0.cov().A + Ainv0_mean = Ainv0.mean + Ainv0_covfactor = Ainv0.cov.A else: Ainv0_mean = Ainv0 Ainv0_covfactor = Ainv0 # Symmetric posterior correspondence @@ -1198,7 +1187,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): if A0 is not None: A0_mean = A0 elif isinstance(Ainv0, prob.RandomVariable): - A0_mean = Ainv0.mean().inv() + A0_mean = Ainv0.mean.inv() else: A0_mean = Ainv0.inv() except AttributeError: @@ -1207,7 +1196,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): "This operation is computationally costly! Specify an inverse " "prior (mean) instead." ) - A0_mean = np.linalg.inv(Ainv0.mean()) + A0_mean = np.linalg.inv(Ainv0.mean) except NotImplementedError: A0_mean = linops.Identity(self.n) warnings.warn( @@ -1223,8 +1212,8 @@ def _get_prior_params(self, A0, Ainv0, x0, b): # Prior on A specified elif A0 is not None and not isinstance(Ainv0, prob.RandomVariable): if isinstance(A0, prob.RandomVariable): - A0_mean = A0.mean() - A0_covfactor = A0.cov().A + A0_mean = A0.mean + A0_covfactor = A0.cov.A else: A0_mean = A0 A0_covfactor = A0 # Symmetric posterior correspondence @@ -1232,7 +1221,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): if Ainv0 is not None: Ainv0_mean = Ainv0 elif isinstance(A0, prob.RandomVariable): - Ainv0_mean = A0.mean().inv() + Ainv0_mean = A0.mean.inv() else: Ainv0_mean = A0.inv() except AttributeError: @@ -1241,7 +1230,7 @@ def _get_prior_params(self, A0, Ainv0, x0, b): "This operation is computationally costly! Specify an inverse " "prior (mean) instead." ) - Ainv0_mean = np.linalg.inv(A0.mean()) + Ainv0_mean = np.linalg.inv(A0.mean) except NotImplementedError: Ainv0_mean = linops.Identity(self.n) warnings.warn( @@ -1255,10 +1244,10 @@ def _get_prior_params(self, A0, Ainv0, x0, b): elif isinstance(A0, prob.RandomVariable) and isinstance( Ainv0, prob.RandomVariable ): - A0_mean = A0.mean() - A0_covfactor = A0.cov().A - Ainv0_mean = Ainv0.mean() - Ainv0_covfactor = Ainv0.cov().A + A0_mean = A0.mean + A0_covfactor = A0.cov.A + Ainv0_mean = Ainv0.mean + Ainv0_covfactor = Ainv0.cov.A return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor, b_mean else: raise NotImplementedError diff --git a/src/probnum/prob/__init__.py b/src/probnum/prob/__init__.py index 314152b6f9..d350bc7baf 100644 --- a/src/probnum/prob/__init__.py +++ b/src/probnum/prob/__init__.py @@ -12,12 +12,12 @@ """ -from .randomvariable import * -from .distributions import * +from ._random_variable import RandomVariable, asrandvar + +from .random_variable import * # Public classes and functions. Order is reflected in documentation. -__all__ = ["RandomVariable", "Distribution", "Dirac", "Normal", "asrandvar"] +__all__ = ["RandomVariable", "Dirac", "Normal", "asrandvar"] # Set correct module paths. Corrects links and module paths in documentation. RandomVariable.__module__ = "probnum.prob" -Distribution.__module__ = "probnum.prob" diff --git a/src/probnum/prob/_random_variable.py b/src/probnum/prob/_random_variable.py new file mode 100644 index 0000000000..0504cb520a --- /dev/null +++ b/src/probnum/prob/_random_variable.py @@ -0,0 +1,904 @@ +""" +Random Variables. + +This module implements random variables. Random variables are the main in- and outputs +of probabilistic numerical methods. +""" + +from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union + +try: + # functools.cached_property is only available in Python >=3.8 + from functools import cached_property +except ImportError: + from cached_property import cached_property + +import numpy as np +import scipy.stats +import scipy.sparse +import scipy._lib._util + +from probnum import utils as _utils + + +ValueType = TypeVar("ValueType") +RandomStateType = Union[ # see scipy._lib._util.check_random_state + None, int, np.random.RandomState, np.random.Generator +] + + +# pylint: disable=too-many-instance-attributes,too-many-public-methods +class RandomVariable(Generic[ValueType]): + """ + Random variables are the main objects used by probabilistic numerical methods. + + Every probabilistic numerical method takes a random variable encoding the prior + distribution as input and outputs a random variable whose distribution encodes the + uncertainty arising from finite computation. The generic signature of a + probabilistic numerical method is: + + ``output_rv = probnum_method(input_rv, method_params)`` + + In practice, most random variables used by methods in ProbNum have Dirac or Gaussian + measure. + + Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and + linear operators. This may change their ``distribution`` and not necessarily all + previously available methods are retained. + + Parameters + ---------- + shape : tuple + Shape of realizations of this random variable. + dtype : numpy.dtype or object + Data type of realizations of this random variable. If ``object`` will be + converted to ``numpy.dtype``. + distribution : Distribution + Probability distribution of the random variable. + + See Also + -------- + asrandvar : Transform into a :class:`RandomVariable`. + Distribution : A class representing probability distributions. + + Examples + -------- + """ + + # pylint: disable=too-many-arguments,too-many-locals + def __init__( + self, + shape: Optional[Union[int, Tuple[int, ...]]] = None, + dtype: Optional[np.dtype] = None, + random_state: Optional[RandomStateType] = None, + parameters: Optional[Dict[str, Any]] = None, + sample: Optional[Callable[[int], ValueType]] = None, + in_support: Optional[Callable[[ValueType], bool]] = None, + pdf: Optional[Callable[[ValueType], float]] = None, + logpdf: Optional[Callable[[ValueType], float]] = None, + cdf: Optional[Callable[[ValueType], float]] = None, + logcdf: Optional[Callable[[ValueType], float]] = None, + quantile: Optional[Callable[[float], ValueType]] = None, + mode: Optional[Callable[[], ValueType]] = None, + median: Optional[Callable[[], ValueType]] = None, + mean: Optional[Callable[[], ValueType]] = None, + cov: Optional[Callable[[], ValueType]] = None, + var: Optional[Callable[[], ValueType]] = None, + std: Optional[Callable[[], ValueType]] = None, + entropy: Optional[Callable[[], float]] = None, + properties: Optional[Dict[str, Any]] = None, + ): + """Create a new random variable.""" + self._shape = RandomVariable._check_shape(shape) + self._dtype = dtype + + self._random_state = scipy._lib._util.check_random_state(random_state) + + # Probability distribution of the random variable + self._parameters = parameters.copy() if parameters is not None else {} + + self.__sample = sample + + self.__in_support = in_support + self.__pdf = pdf + self.__logpdf = logpdf + self.__cdf = cdf + self.__logcdf = logcdf + self.__quantile = quantile + + # Properties of the random variable + if properties is None: + properties = {} + + self.__mode = RandomVariable._get_property_fn("mode", mode, properties) + self.__median = RandomVariable._get_property_fn("median", median, properties) + self.__mean = RandomVariable._get_property_fn("mean", mean, properties) + self.__cov = RandomVariable._get_property_fn("cov", cov, properties) + self.__var = RandomVariable._get_property_fn("var", var, properties) + self.__std = RandomVariable._get_property_fn("std", std, properties) + self.__entropy = RandomVariable._get_property_fn("entropy", entropy, properties) + + # Further processing + if self._shape is None or self._dtype is None: + inferred_shape, inferred_dtype = self._infer_shape_and_dtype() + + if self._shape is None: + self._shape = inferred_shape + + if self._dtype is None: + self._dtype = inferred_dtype + + @staticmethod + def _check_shape( + shape: Optional[Union[int, Tuple[int, ...]]] + ) -> Optional[Tuple[int, ...]]: + if shape is None: + return None + elif isinstance(shape, tuple) and all( + isinstance(entry, int) for entry in shape + ): + return shape + elif isinstance(shape, int): + return (shape,) + else: + raise TypeError("The given shape is not an int or a tuple of ints.") + + @staticmethod + def _get_property_fn( + name: str, + argument: Optional[Union[ValueType, np.floating]], + properties_dict: Optional[Dict[str, Union[ValueType, np.floating]]], + ) -> Callable[[], Union[ValueType, np.floating]]: + if name in properties_dict and properties_dict[name] is not None: + if argument is not None: + raise ValueError( + f"The property '{name}' was specified both directly in a " + f"constructor argument and via the 'properties' dictionary." + ) + + return lambda: properties_dict[name] + + return argument + + def _infer_shape_and_dtype(self): + # Infer shape and dtype based on a sample + try: + sample = self.sample() + + if np.isscalar(sample): + shape = () + else: + shape = sample.shape + + return shape, sample.dtype + except NotImplementedError: + pass + + # Infer shape and dtype based on mean + try: + if np.isscalar(self.mean): + shape = () + else: + shape = self.mean.shape + + return shape, self.mean.dtype + except NotImplementedError: + pass + + raise NotImplementedError( + "The shape of the random variable could not be inferred automatically" + ) + + def __repr__(self): + if self.dtype is None: + dt = "unspecified dtype" + else: + dt = "dtype=" + str(self.dtype) + return "<%s %s with %s>" % (str(self.shape), self.__class__.__name__, dt) + + @property + def shape(self) -> Tuple[int, ...]: + """Shape of realizations of the random variable.""" + return self._shape + + @cached_property + def ndim(self) -> int: + return len(self._shape) + + @cached_property + def size(self) -> int: + return int(np.prod(self._shape)) + + @property + def dtype(self) -> np.dtype: + """Data type of (elements of) a realization of this random variable.""" + return self._dtype + + @property + def random_state(self) -> Union[np.random.RandomState, np.random.Generator]: + """Random state of the random variable. + + This attribute defines the RandomState object to use for drawing + realizations from this random variable. + If None (or np.random), the global np.random state is used. + If integer, it is used to seed the local :class:`~numpy.random.RandomState` + instance. + """ + return self._random_state + + @random_state.setter + def random_state(self, seed: RandomStateType): + """ Get or set the RandomState object of the underlying distribution. + + This can be either None or an existing RandomState object. + If None (or np.random), use the RandomState singleton used by np.random. + If already a RandomState instance, use it. + If an int, use a new RandomState instance seeded with seed. + """ + self._random_state = scipy._lib._util.check_random_state(seed) + + @property + def parameters(self): + """ + Parameters of the probability distribution. + + The parameters of the distribution such as mean, variance, et cetera stored in a + ``dict``. + """ + return self._parameters.copy() + + @staticmethod + def _check_property_value( + name: str, + value: Any, + shape: Optional[Tuple[int, ...]] = None, + dtype: Optional[np.dtype] = None, + ): + if shape is not None: + if value.shape != shape: + raise ValueError( + f"The {name} of the random variable does not have the correct " + f"shape. Expected {shape} but got {value.shape}." + ) + + if dtype is not None: + if not np.issubdtype(value.dtype, dtype): + raise ValueError( + f"The {name} of the random variable does not have the correct " + f"dtype. Expected {dtype.name} but got {value.dtype.name}." + ) + + @cached_property + def mode(self): + """ + Mode of the random variable. + + Returns + ------- + mode : float + The mode of the random variable. + """ + if self.__mode is None: + raise NotImplementedError + + mode = self.__mode() + + RandomVariable._check_property_value( + "mode", mode, shape=self._shape, dtype=self._dtype, + ) + + return mode + + @cached_property + def median(self): + """ + Median of the random variable. + + Returns + ------- + median : float + The median of the distribution. + """ + + if self._shape != (): + raise NotImplementedError( + "The median is only defined for scalar random variables." + ) + + if self.__median is None: + try: + median = self.quantile(0.5) + except NotImplementedError: + raise NotImplementedError + else: + median = self.__median() + + RandomVariable._check_property_value( + "median", median, shape=(), dtype=self._dtype, + ) + + return median + + @cached_property + def mean(self): + """ + Mean :math:`\\mathbb{E}(X)` of the distribution. + + Returns + ------- + mean : array-like + The mean of the distribution. + """ + if self.__mean is None: + raise NotImplementedError + + mean = self.__mean() + + RandomVariable._check_property_value( + "mean", mean, shape=self._shape, dtype=self._dtype, + ) + + return mean + + @cached_property + def cov(self): + """ + Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` + of the random variable. + + Returns + ------- + cov : array-like + The kernels of the random variable. + """ # pylint: disable=line-too-long + if self.__cov is None: + raise NotImplementedError + + cov = self.__cov() + + RandomVariable._check_property_value( + "covariance", + cov, + shape=(self.size, self.size) if self.ndim > 0 else (), + dtype=self._dtype, + ) + + return cov + + @cached_property + def var(self): + """ + Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of + the distribution. + + Returns + ------- + var : array-like + The variance of the distribution. + """ + if self.__var is None: + try: + var = np.diag(self.cov).reshape(self._shape).copy() + except NotImplementedError: + raise NotImplementedError + else: + var = self.__var() + + RandomVariable._check_property_value( + "variance", var, shape=self._shape, dtype=self._dtype, + ) + + return var + + @cached_property + def std(self): + """ + Standard deviation of the distribution. + + Returns + ------- + std : array-like + The standard deviation of the distribution. + """ + if self.__std is None: + try: + std = np.sqrt(self.var) + except NotImplementedError: + raise NotImplementedError + else: + std = self.__std() + + RandomVariable._check_property_value( + "standard deviation", std, shape=self._shape, dtype=self._dtype, + ) + + return std + + @cached_property + def entropy(self): + if self.__entropy is None: + raise NotImplementedError + + entropy = self.__entropy() + + RandomVariable._check_property_value( + "entropy", entropy, shape=(), dtype=np.floating, + ) + + return entropy + + def in_support(self, x: ValueType) -> bool: + if self.__in_support is None: + raise NotImplementedError + + return self.__in_support(x) + + def sample(self, size=()): + """ + Draw realizations from a random variable. + + Parameters + ---------- + size : tuple + Size of the drawn sample of realizations. + + Returns + ------- + sample : array-like + Sample of realizations with the given ``size`` and the inherent ``shape``. + """ + if self.__sample is None: + raise NotImplementedError("No sampling method provided.") + + return self.__sample(size=size) + + def pdf(self, x): + """ + Probability density or mass function. + + Parameters + ---------- + x : array-like + Evaluation points of the probability density / mass function. + + Returns + ------- + p : array-like + Value of the probability density / mass function at the given points. + + """ + if self.__pdf is not None: + return self.__pdf(x) + if self.__logpdf is not None: + return np.exp(self.__logpdf(x)) + raise NotImplementedError( + "The function 'pdf' is not implemented for object of class {}".format( + type(self).__name__ + ) + ) + + def logpdf(self, x): + """ + Natural logarithm of the probability density function. + + Parameters + ---------- + x : array-like + Evaluation points of the log-probability density/mass function. + + Returns + ------- + logp : array-like + Value of the log-probability density / mass function at the given points. + """ + if self.__logpdf is not None: + return self.__logpdf(x) + elif self.__pdf is not None: + return np.log(self.__pdf(x)) + else: + raise NotImplementedError( + f"The function 'logpdf' is not implemented for object of class " + f"{type(self).__name__}." + ) + + def cdf(self, x): + """ + Cumulative distribution function. + + Parameters + ---------- + x : array-like + Evaluation points of the cumulative distribution function. + + Returns + ------- + q : array-like + Value of the cumulative density function at the given points. + """ + if self.__cdf is not None: + return self.__cdf(x) + elif self.__logcdf is not None: + return np.exp(self.__logcdf(x)) + else: + raise NotImplementedError( + "The function 'cdf' is not implemented for object of class {}".format( + type(self).__name__ + ) + ) + + def logcdf(self, x): + """ + Log-cumulative distribution function. + + Parameters + ---------- + x : array-like + Evaluation points of the cumulative distribution function. + + Returns + ------- + q : array-like + Value of the log-cumulative density function at the given points. + """ + if self.__logcdf is not None: + return self.__logcdf(x) + elif self.__cdf is not None: + return np.log(self.__cdf(x)) + else: + raise NotImplementedError( + f"The function 'logcdf' is not implemented for object of class " + f"{type(self).__name__}." + ) + + def quantile(self, p: Union[float, np.floating]) -> ValueType: + if self.__quantile is None: + raise NotImplementedError + + return self.__quantile(p) + + def reshape(self, newshape): + """ + Give a new shape to a random variable. + + Parameters + ---------- + newshape : int or tuple of ints + New shape for the random variable. It must be compatible with the original + shape. + + Returns + ------- + reshaped_rv : ``self`` with the new dimensions of ``shape``. + """ + raise NotImplementedError( + f"Reshaping not implemented for random variables of type: " + f"{self.__class__.__name__}." + ) + + def transpose(self, *axes): + """ + Transpose the random variable. + + Parameters + ---------- + axes : None, tuple of ints, or n ints + See documentation of numpy.ndarray.transpose. + + Returns + ------- + transposed_rv : The transposed random variable. + """ + raise NotImplementedError( + f"Transposition not implemented for random variables of type: " + f"{self.__class__.__name__}." + ) + + T = property(transpose) + + # Unary arithmetic operations + + def __neg__(self) -> "RandomVariable": + return NotImplemented + + def __pos__(self) -> "RandomVariable": + return NotImplemented + + def __abs__(self) -> "RandomVariable": + return RandomVariable( + shape=self.shape, + dtype=self.dtype, + random_state=_utils.derive_random_seed(self.random_state), + sample=lambda size: abs(self.sample(size=size)), + ) + + # Binary arithmetic operations + + __array_ufunc__ = None + """ + This prevents numpy from calling elementwise arithmetic + operations allowing expressions like: y = np.array([1, 1]) + RV + to call the arithmetic operations defined by RandomVariable + instead of elementwise. Thus no array of RandomVariables but a + RandomVariable with the correct shape is returned. + """ + + def __add__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self + asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import add + + return add(self, other) + + def __radd__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) + self + + return NotImplemented + + def __sub__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self - asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import sub + + return sub(self, other) + + def __rsub__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) - self + + return NotImplemented + + def __mul__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self * asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import mul + + return mul(self, other) + + def __rmul__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) * self + + return NotImplemented + + def __matmul__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self @ asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import matmul + + return matmul(self, other) + + def __rmatmul__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) @ self + + return NotImplemented + + def __truediv__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self / asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import truediv + + return truediv(self, other) + + def __rtruediv__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) / self + + return NotImplemented + + def __floordiv__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self // asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import floordiv + + return floordiv(self, other) + + def __rfloordiv__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) // self + + return NotImplemented + + def __mod__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self % asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import mod + + return mod(self, other) + + def __rmod__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) % self + + return NotImplemented + + def __divmod__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return divmod(self, asrandvar(other)) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import divmod_ + + return divmod_(self, other) + + def __rdivmod__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return divmod(asrandvar(other), self) + + return NotImplemented + + def __pow__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return self ** asrandvar(other) + + # pylint: disable=import-outside-toplevel + from .random_variable._arithmetic import pow_ + + return pow_(self, other) + + def __rpow__(self, other) -> "RandomVariable": + if not isinstance(other, RandomVariable): + return asrandvar(other) ** self + + return NotImplemented + + +def asrandvar(obj) -> RandomVariable: + """ + Return ``obj`` as a :class:`RandomVariable`. + + Converts scalars, (sparse) arrays or distribution classes to a + :class:`RandomVariable`. + + Parameters + ---------- + obj : object + Argument to be represented as a :class:`RandomVariable`. + + Returns + ------- + rv : RandomVariable + The object `obj` as a :class:`RandomVariable`. + + See Also + -------- + RandomVariable : Class representing random variables. + + Examples + -------- + >>> from scipy.stats import bernoulli + >>> from probnum.prob import asrandvar + >>> bern = bernoulli(p=0.5) + >>> bern.random_state = 42 # Seed for reproducibility + >>> b = asrandvar(bern) + >>> b.sample(size=5) + array([1, 1, 1, 0, 0]) + """ + + # pylint: disable=import-outside-toplevel + from probnum.prob.random_variable import Dirac + + # RandomVariable + if isinstance(obj, RandomVariable): + return obj + # Scalar + elif np.isscalar(obj): + return Dirac(support=obj) + # Numpy array, sparse array or Linear Operator + elif isinstance( + obj, (np.ndarray, scipy.sparse.spmatrix, scipy.sparse.linalg.LinearOperator) + ): + return Dirac(support=obj) + # Scipy random variable + elif isinstance( + obj, + ( + scipy.stats._distn_infrastructure.rv_frozen, + scipy.stats._multivariate.multi_rv_frozen, + ), + ): + return _scipystats_to_rv(scipyrv=obj) + else: + raise ValueError( + f"Argument of type {type(obj)} cannot be converted to a random variable." + ) + + +def _scipystats_to_rv( + scipyrv: Union[ + scipy.stats._distn_infrastructure.rv_frozen, + scipy.stats._multivariate.multi_rv_frozen, + ] +): + """ + Transform SciPy distributions to Probnum :class:`RandomVariable`s. + + Parameters + ---------- + scipyrv : + SciPy distribution. + + Returns + ------- + dist : RandomVariable + ProbNum random variable. + + """ + + # pylint: disable=import-outside-toplevel + from probnum.prob.random_variable import Normal + + # Univariate distributions (implemented in this package) + if isinstance(scipyrv, scipy.stats._distn_infrastructure.rv_frozen): + # Normal distribution + if scipyrv.dist.name == "norm": + return Normal( + mean=scipyrv.mean(), + cov=scipyrv.var(), + random_state=scipyrv.random_state, + ) + # Multivariate distributions (implemented in this package) + elif isinstance(scipyrv, scipy.stats._multivariate.multi_rv_frozen): + # Multivariate normal + if scipyrv.__class__.__name__ == "multivariate_normal_frozen": + return Normal( + mean=scipyrv.mean, cov=scipyrv.cov, random_state=scipyrv.random_state, + ) + # Generic distributions + if ( + hasattr(scipyrv, "dist") and isinstance(scipyrv.dist, scipy.stats.rv_discrete) + ) or hasattr(scipyrv, "pmf"): + pdf = getattr(scipyrv, "pmf", None) + logpdf = getattr(scipyrv, "logpmf", None) + else: + pdf = getattr(scipyrv, "pdf", None) + logpdf = getattr(scipyrv, "logpdf", None) + + def _wrap_np_scalar(fn): + if fn is None: + return None + + def _wrapper(*args, **kwargs): + res = fn(*args, **kwargs) + + if np.isscalar(res): + return _utils.as_numpy_scalar(res) + + return res + + return _wrapper + + return RandomVariable( + shape=None, # will be inferred automatically + dtype=None, # will be inferred automatically + random_state=getattr(scipyrv, "random_state", None), + sample=_wrap_np_scalar(getattr(scipyrv, "rvs", None)), + in_support=None, # TODO for univariate + pdf=_wrap_np_scalar(pdf), + logpdf=_wrap_np_scalar(logpdf), + cdf=_wrap_np_scalar(getattr(scipyrv, "cdf", None)), + logcdf=_wrap_np_scalar(getattr(scipyrv, "logcdf", None)), + quantile=_wrap_np_scalar(getattr(scipyrv, "ppf", None)), + mode=None, # not offered by scipy.stats + median=_wrap_np_scalar(getattr(scipyrv, "median", None)), + mean=_wrap_np_scalar(getattr(scipyrv, "mean", None)), + cov=_wrap_np_scalar(getattr(scipyrv, "cov", None)), + var=_wrap_np_scalar(getattr(scipyrv, "var", None)), + std=_wrap_np_scalar(getattr(scipyrv, "std", None)), + entropy=_wrap_np_scalar(getattr(scipyrv, "entropy", None)), + ) diff --git a/src/probnum/prob/_randomvariablelist.py b/src/probnum/prob/_randomvariablelist.py index feed935a1a..4a53059714 100644 --- a/src/probnum/prob/_randomvariablelist.py +++ b/src/probnum/prob/_randomvariablelist.py @@ -16,16 +16,16 @@ def __init__(self, rv_list): super().__init__(rv_list) def mean(self): - return np.stack([rv.mean() for rv in self]) + return np.stack([rv.mean for rv in self]) def cov(self): - return np.stack([rv.cov() for rv in self]) + return np.stack([rv.cov for rv in self]) def var(self): - return np.stack([rv.distribution.var() for rv in self]) + return np.stack([rv.var() for rv in self]) def std(self): - return np.stack([rv.distribution.std() for rv in self]) + return np.stack([rv.std() for rv in self]) def __getitem__(self, idx): """Make sure to wrap the result into a _RandomVariableList if necessary""" diff --git a/src/probnum/prob/distributions/__init__.py b/src/probnum/prob/distributions/__init__.py deleted file mode 100644 index 84561e90a5..0000000000 --- a/src/probnum/prob/distributions/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .normal import * -from .dirac import * -from .distribution import * diff --git a/src/probnum/prob/distributions/dirac.py b/src/probnum/prob/distributions/dirac.py deleted file mode 100644 index 030e9e27f4..0000000000 --- a/src/probnum/prob/distributions/dirac.py +++ /dev/null @@ -1,275 +0,0 @@ -""" -Dirac delta distribution. -""" -import operator - -import numpy as np - -from probnum.prob.distributions.distribution import Distribution - - -class Dirac(Distribution): - """ - The Dirac delta distribution. - - This distribution models a point mass and can be useful to represent - numbers as random variables with Dirac measure. It has the useful - property that arithmetic operations between a :class:`Dirac` random - variable and an arbitrary :class:`RandomVariable` acts in the same - way as the arithmetic operation with a constant. - - Note, that a Dirac measure does not admit a probability density - function but can be viewed as a distribution (generalized function). - - Parameters - ---------- - support : scalar or array-like or LinearOperator - The support of the dirac delta function. - - See Also - -------- - Distribution : Class representing general probability distribution. - - Examples - -------- - >>> from probnum.prob import RandomVariable, Dirac - >>> dist1 = Dirac(support=0.) - >>> dist2 = Dirac(support=1.) - >>> rv = RandomVariable(distribution=dist1 + dist2) - >>> rv.sample(size=5) - array([1., 1., 1., 1., 1.]) - """ - - def __init__(self, support, random_state=None): - if np.isscalar(support): - _dtype = np.dtype(type(support)) - else: - _dtype = support.dtype - super().__init__( - parameters={"support": support}, dtype=_dtype, random_state=random_state - ) - - def cdf(self, x): - if np.any(x < self.parameters["support"]): - return 0.0 - else: - return 1.0 - - def median(self): - return self.parameters["support"] - - def mode(self): - return self.parameters["support"] - - def mean(self): - return self.parameters["support"] - - def var(self): - return 0.0 - - def cov(self): - if np.isscalar(self.parameters["support"]): - return self.var() - else: - return np.zeros( - (len(self.parameters["support"]), len(self.parameters["support"])) - ) - - def sample(self, size=(), seed=None): - ndims = len(self.shape) - if size == 1 or size == (): # pylint: disable=consider-using-in - return self.parameters["support"] - elif isinstance(size, int) and ndims == 0: - return np.tile(A=self.parameters["support"], reps=size) - elif isinstance(size, int): - return np.tile( - A=self.parameters["support"], reps=[size, *np.repeat(1, ndims)] - ) - else: - return np.tile( - A=self.parameters["support"], reps=tuple([*size, *np.repeat(1, ndims)]) - ) - - def __getitem__(self, key): - """ - Marginalization for multivariate Dirac distributions, expressed by means of - (advanced) indexing, masking and slicing. - - This method - supports all modes of array indexing presented in - - https://numpy.org/doc/1.19/reference/arrays.indexing.html. - - Parameters - ---------- - key : int or slice or ndarray or tuple of None, int, slice, or ndarray - Indices, slice objects and/or boolean masks specifying which entries to keep - while marginalizing over all other entries. - """ - return Dirac( - support=self.parameters["support"][key], random_state=self.random_state, - ) - - def reshape(self, newshape): - try: - # Reshape support - return Dirac( - support=self.parameters["support"].reshape(newshape), - random_state=self.random_state, - ) - except ValueError: - raise ValueError( - "Cannot reshape this Dirac distribution to the given shape: {}".format( - str(newshape) - ) - ) - - def _reshape_inplace(self, newshape): - self.parameters["support"].shape = newshape - - def transpose(self, **axes): - return Dirac( - support=self.parameters["support"].transpose(**axes), - random_state=self.random_state, - ) - - # Binary arithmetic operations - def __add__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] + other.parameters["support"] - return Dirac(support=support_, random_state=self.random_state) - else: - return other.__add__(other=self) - - def __sub__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] - other.parameters["support"] - return Dirac(support=support_, random_state=self.random_state) - else: - return other.__rsub__(other=self) - - def __mul__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] * other.parameters["support"] - return Dirac(support=support_, random_state=self.random_state) - else: - return other.__mul__(other=self) - - def __matmul__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] @ other.parameters["support"] - return Dirac(support=support_, random_state=self.random_state) - else: - return other.__rmatmul__(other=self) - - def __truediv__(self, other): - if isinstance(other, Dirac): - support_ = operator.truediv( - self.parameters["support"], other.parameters["support"] - ) - return Dirac(support=support_, random_state=self.random_state) - else: - return other.__rtruediv__(other=self) - - def __pow__(self, power, modulo=None): - if isinstance(power, Dirac): - support_ = pow( - self.parameters["support"], power.parameters["support"], modulo - ) - return Dirac(support=support_, random_state=self.random_state) - else: - return power.__rpow__(power=self, modulo=modulo) - - # Binary arithmetic operations with reflected (swapped) operands - - def __radd__(self, other): - return other.__add__(other=self) - - def __rsub__(self, other): - return other.__sub__(other=self) - - def __rmul__(self, other): - return other.__mul__(other=self) - - def __rmatmul__(self, other): - return other.__matmul__(other=self) - - def __rtruediv__(self, other): - return other.__truediv__(other=self) - - def __rpow__(self, power, modulo=None): - return power.__pow__(power=self) - - # Augmented arithmetic assignments (+=, -=, *=, ...) - # attempting to do the operation in place - - def __iadd__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] + other.parameters["support"] - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - def __isub__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] - other.parameters["support"] - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - def __imul__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] * other.parameters["support"] - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - def __imatmul__(self, other): - if isinstance(other, Dirac): - support_ = self.parameters["support"] @ other.parameters["support"] - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - def __itruediv__(self, other): - if isinstance(other, Dirac): - support_ = operator.truediv( - self.parameters["support"], other.parameters["support"] - ) - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - def __ipow__(self, power, modulo=None): - if isinstance(power, Dirac): - support_ = pow( - self.parameters["support"], power.parameters["support"], modulo - ) - self.parameters["support"] = support_ - return self - else: - return NotImplemented - - # Unary arithmetic operations - - def __neg__(self): - self.parameters["support"] = operator.neg(self.parameters["support"]) - return self - - def __pos__(self): - self.parameters["support"] = operator.pos(self.parameters["support"]) - return self - - def __abs__(self): - self.parameters["support"] = operator.abs(self.parameters["support"]) - return self - - def __invert__(self): - support_ = self.parameters["support"] - self.parameters["support"] = operator.invert(support_) - return self diff --git a/src/probnum/prob/distributions/distribution.py b/src/probnum/prob/distributions/distribution.py deleted file mode 100644 index 464fa2ffbc..0000000000 --- a/src/probnum/prob/distributions/distribution.py +++ /dev/null @@ -1,456 +0,0 @@ -""" -Probability Distribution. - -This module provides a class implementing a probability distribution along with its -properties. -""" - -import numpy as np -import scipy.stats -import scipy.sparse -import scipy._lib._util - - -class Distribution: - """ - A class representing probability distributions. - - This class is primarily intended to be subclassed to provide - distribution-specific implementations of the various methods - (``logpdf``, ``logcdf``, ``sample``, ``mean``, ``var``, ...). When - creating a subclass implementing a certain distribution for which - operations like addition or multiplication are available, please - consider implementing them, as done for instance in :class:`Normal`. - The overriden methods should also update the ``parameters`` of the - distribution. This allows arithmetic operations on instances of - :class:`Distribution` and :class:`RandomVariable`. - - Parameters - ---------- - parameters : dict - Dictionary of distribution parameters such as mean, variance, shape, etc. - pdf : callable - Probability density or mass function. - logpdf : callable - Log-probability density or mass function. - cdf : callable - Cumulative distribution function. - logcdf : callable - Log-cumulative distribution function. - sample : callable - Function implementing sampling. Must have signature ``sample(size=())``. - mean : callable - Function returning the mean of the distribution. - cov : callable - Function returning the covariance of the distribution. - shape : tuple - Shape of samples from this distribution. - dtype : numpy.dtype or object - Data type of realizations of a random variable with this distribution. If - ``object`` will be converted to ``numpy.dtype``. - random_state : None or int or :class:`~numpy.random.RandomState` instance, optional - This parameter defines the RandomState object to use for drawing realizations - from this distribution. If None (or np.random), the global np.random state is - used. If integer, it is used to seed the local - :class:`~numpy.random.RandomState` instance. Default is None. - - See Also - -------- - RandomVariable : - Random variables are the main objects used by probabilistic numerical methods. - - Examples - -------- - - """ - - def __init__( - self, - parameters=None, - pdf=None, - logpdf=None, - cdf=None, - logcdf=None, - sample=None, - mean=None, - cov=None, - shape=None, - dtype=None, - random_state=None, - ): - if parameters is None: - parameters = {} # sentinel value to avoid anti-pattern - self._parameters = parameters - self._pdf = pdf - self._logpdf = logpdf - self._cdf = cdf - self._logcdf = logcdf - self._sample = sample - self._mean = mean - self._cov = cov - self._set_shape(shape) - self._dtype = dtype - self._random_state = scipy._lib._util.check_random_state(random_state) - - def _check_distparams(self): - pass - # TODO: type checking of self._parameters; check method signatures. - - def _set_shape(self, shape): - """ - Sets shape in accordance with distribution mean. - """ - self._shape = shape - try: - # Set shape based on mean - if np.isscalar(self.mean()): - shape_mean = () - else: - shape_mean = self.mean().shape - if shape is None or shape_mean == shape: - self._shape = shape_mean - else: - raise ValueError( - "Shape of distribution mean and given shape do not match." - ) - except NotImplementedError: - # Set shape based on a sample - if np.isscalar(self.sample(size=1)): - shape_sample = () - else: - shape_sample = self.sample(size=1).shape - if shape is None or shape_sample == shape: - self._shape = shape_sample - else: - raise ValueError( - "Shape of distribution sample and given shape do not match." - ) - - @property - def shape(self): - """Shape of samples from this distribution.""" - return self._shape - - @shape.setter - def shape(self, newshape): - self._reshape_inplace(newshape) - - self._set_shape(newshape) - - def _reshape_inplace(self, newshape): - raise NotImplementedError - - @property - def dtype(self): - """``Dtype`` of elements of samples from this distribution.""" - return self._dtype - - @dtype.setter - def dtype(self, newtype): - """Set the ``dtype`` of the distribution.""" - self._dtype = newtype - - @property - def random_state(self): - """Random state of the distribution. - - This attribute defines the RandomState object to use for drawing realizations - from this distribution. If None (or np.random), the global np.random state is - used. If integer, it is used to seed the local - :class:`~numpy.random.RandomState` instance. - """ - return self._random_state - - @random_state.setter - def random_state(self, seed): - """ Get or set the RandomState object of the underlying distribution. - - This can be either None or an existing RandomState object. If None (or - np.random), use the RandomState singleton used by np.random. If already a - RandomState instance, use it. If an int, use a new RandomState instance seeded - with seed. - """ - self._random_state = scipy._lib._util.check_random_state(seed) - - @property - def parameters(self): - """ - Parameters of the probability distribution. - - The parameters of the distribution such as mean, variance, et cetera stored in a - ``dict``. - """ - if self._parameters is not None: - return self._parameters - else: - raise AttributeError( - "No parameters of {} are available.".format(type(self).__name__) - ) - - def pdf(self, x): - """ - Probability density or mass function. - - Parameters - ---------- - x : array-like - Evaluation points of the probability density / mass function. - - Returns - ------- - p : array-like - Value of the probability density / mass function at the given points. - - """ - if self._pdf is not None: - return self._pdf(x) - if self._logpdf is not None: - return np.exp(self._logpdf(x)) - raise NotImplementedError( - "The function 'pdf' is not implemented for object of class {}".format( - type(self).__name__ - ) - ) - - def logpdf(self, x): - """ - Natural logarithm of the probability density function. - - Parameters - ---------- - x : array-like - Evaluation points of the log-probability density/mass function. - - Returns - ------- - logp : array-like - Value of the log-probability density / mass function at the given points. - """ - if self._logpdf is not None: - return self._logpdf(x) - elif self._pdf is not None: - return np.log(self._pdf(x)) - else: - raise NotImplementedError( - "The function 'logpdf' is not implemented for object of class " - "{}".format(type(self).__name__) - ) - - def cdf(self, x): - """ - Cumulative distribution function. - - Parameters - ---------- - x : array-like - Evaluation points of the cumulative distribution function. - - Returns - ------- - q : array-like - Value of the cumulative density function at the given points. - """ - if self._cdf is not None: - return self._cdf(x) - elif self._logcdf is not None: - return np.exp(self._logcdf(x)) - else: - raise NotImplementedError( - "The function 'cdf' is not implemented for object of class " - "{}".format(type(self).__name__) - ) - - def logcdf(self, x): - """ - Log-cumulative distribution function. - - Parameters - ---------- - x : array-like - Evaluation points of the cumulative distribution function. - - Returns - ------- - q : array-like - Value of the log-cumulative density function at the given points. - """ - if self._logcdf is not None: - return self._logcdf(x) - elif self._cdf is not None: - return np.log(self._cdf(x)) - else: - raise NotImplementedError( - "The function 'logcdf' is not implemented for object of class " - "{}".format(type(self).__name__) - ) - - def sample(self, size=()): - """ - Returns realizations from the associated random variable. - - Parameters - ---------- - size : tuple, default=() - Size of the realizations. - - Returns - ------- - realizations : array-like or LinearOperator - Realizations of the underlying random variable. - """ - if self._sample is not None: - return self._sample(size=size) - else: - raise NotImplementedError( - "The function 'sample' is not implemented for object of class " - "{}.".format(type(self).__name__) - ) - - def median(self): - """ - Median of the distribution. - - Returns - ------- - median : float - The median of the distribution. - """ - return self.cdf(x=0.5) - - def mode(self): - """ - Mode of the distribution. - - Returns - ------- - mode : float - The mode of the distribution. - """ - if "mode" in self._parameters: - return self._parameters["mode"] - else: - raise NotImplementedError( - "The function 'mode' is not implemented for object of class " - "{}.".format(type(self).__name__) - ) - - def mean(self): - """ - Mean :math:`\\mathbb{E}(X)` of the distribution. - - Returns - ------- - mean : array-like - The mean of the distribution. - """ - if self._mean is not None: - return self._mean() - elif "mean" in self._parameters: - return self._parameters["mean"] - else: - raise NotImplementedError( - "The function 'mean' is not implemented for object of class {}.".format( - type(self).__name__ - ) - ) - - def cov(self): - """ - Covariance - :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` - of the distribution. - - Returns - ------- - cov : array-like - The kernels of the distribution. - """ # pylint: disable=line-too-long - if self._cov is not None: - return self._cov() - elif "cov" in self._parameters: - return self._parameters["cov"] - else: - raise NotImplementedError( - "The function 'cov' is not implemented for object of class " - "{}".format(type(self).__name__) - ) - - def var(self): - """ - Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` - of the distribution. - - Returns - ------- - var : array-like - The variance of the distribution. - """ - if "var" in self._parameters: - return self._parameters["var"] - else: - raise NotImplementedError( - "The function 'var' is not implemented for object of class {}".format( - type(self).__name__ - ) - ) - - def std(self): - """ - Standard deviation of the distribution. - - Returns - ------- - std : array-like - The standard deviation of the distribution. - """ - return np.sqrt(self.var()) - - def reshape(self, newshape): - """ - Give a new shape to (realizations of) this distribution. - - Parameters - ---------- - newshape : int or tuple of ints - New shape for the realizations and parameters of this distribution. It must - be compatible with the original shape. - - Returns - ------- - reshaped_rv : ``self`` with the new dimensions of ``shape``. - """ - raise NotImplementedError( - "Reshaping not implemented for distribution of type: {}.".format( - self.__class__.__name__ - ) - ) - - def transpose(self, *axes): - raise NotImplementedError( - "Transposition not implemented for distribution of type: {}.".format( - self.__class__.__name__ - ) - ) - - def __getitem__(self, key): - """ - (Advanced) indexing, masking and slicing into (realizations of) this - distribution. - - This is essentially marginalization for multivariate distributions. This method - supports all modes of array indexing presented in - - https://numpy.org/doc/1.19/reference/arrays.indexing.html. - - However, the available modes of indexing vary with the concrete distribution. - - Parameters - ---------- - key : int or slice or ndarray or tuple of None, int, slice, or ndarray - Indices, slice objects and/or boolean masks specifying which entries to keep - while marinalizing over all other entries. - """ - raise NotImplementedError( - "(Advanced) indexing and slicing is not implemented for distribution of " - "type: {}.".format(self.__class__.__name__) - ) diff --git a/src/probnum/prob/distributions/normal.py b/src/probnum/prob/distributions/normal.py deleted file mode 100644 index 21b1b370bd..0000000000 --- a/src/probnum/prob/distributions/normal.py +++ /dev/null @@ -1,765 +0,0 @@ -""" -Normal distribution. - -This module implements the Gaussian distribution in its univariate, -multi-variate, matrix-variate and operator-variate forms. -""" -import operator -import warnings - -import numpy as np -import scipy.stats -import scipy.sparse -import scipy._lib._util - -from probnum.linalg import linops -from probnum.prob.distributions.distribution import Distribution -from probnum.prob.distributions.dirac import Dirac -from probnum import utils as _utils - - -class Normal(Distribution): - """ - The normal distribution. - - The Gaussian distribution is ubiquitous in probability theory, since - it is the final and stable or equilibrium distribution to which - other distributions gravitate under a wide variety of smooth - operations, e.g., convolutions and stochastic transformations. - One example of this is the central limit theorem. The Gaussian - distribution is also attractive from a numerical point of view as it - is maintained through many transformations (e.g. it is stable). - - Parameters - ---------- - mean : float or array-like or LinearOperator - Mean of the normal distribution. - - cov : float or array-like or LinearOperator - (Co-)variance of the normal distribution. - - random_state : None or int or :class:`~numpy.random.RandomState` instance, optional - This parameter defines the RandomState object to - use for drawing realizations from this - distribution. Think of it like a random seed. - If None (or np.random), the global - np.random state is used. If integer, it is used to - seed the local - :class:`~numpy.random.RandomState` instance. - Default is None. - - See Also - -------- - Distribution : Class representing general probability distributions. - - Examples - -------- - >>> from probnum.prob import Normal - >>> N = Normal(mean=0.5, cov=1.0) - >>> N.parameters - {'mean': 0.5, 'cov': 1.0} - """ - - def __new__(cls, mean=0.0, cov=1.0, random_state=None): - """ - Factory method for normal subclasses. - - Checks shape/type of mean and cov and returns the corresponding - type of Normal distribution: - * _UnivariateNormal - * _MultivariateNormal - * _MatrixvariateNormal - * _SymmetricKroneckerIdenticalFactorsNormal - * _OperatorvariateNormal - If neither applies, a ValueError is raised. - """ - if cls is Normal: - if _both_are_univariate(mean, cov): - return _UnivariateNormal(mean, cov, random_state) - elif _both_are_multivariate(mean, cov): - return _MultivariateNormal(mean, cov, random_state) - elif _both_are_matrixvariate(mean, cov): - return _MatrixvariateNormal(mean, cov, random_state) - elif _both_are_symmkronidfactors(mean, cov): - return _SymmetricKroneckerIdenticalFactorsNormal( - mean, cov, random_state - ) - elif _both_are_operatorvariate(mean, cov): - return _OperatorvariateNormal(mean, cov, random_state) - else: - errmsg = ( - "Cannot instantiate normal distribution with mean of " - + "type {} and ".format(mean.__class__.__name__) - + "kernels of " - + "type {}.".format(cov.__class__.__name__) - ) - raise ValueError(errmsg) - else: - return super().__new__(cls) - - def __init__(self, mean=0.0, cov=1.0, random_state=None): - super().__init__( - parameters={"mean": mean, "cov": cov}, - dtype=float, - random_state=random_state, - ) - - def mean(self): - return self.parameters["mean"] - - def cov(self): - return self.parameters["cov"] - - def var(self): - raise NotImplementedError - - def __getitem__(self, key): - """ - Marginalization in multi- and matrixvariate normal distributions, expressed by - means of (advanced) indexing, masking and slicing. - - We support all modes of array indexing presented in - - https://numpy.org/doc/1.19/reference/arrays.indexing.html. - - Note that, currently, this method does not work for normal distributions other - than the multi- and matrixvariate versions. - - Parameters - ---------- - key : int or slice or ndarray or tuple of None, int, slice, or ndarray - Indices, slice objects and/or boolean masks specifying which entries to keep - while marginalizing over all other entries. - """ - if not isinstance(key, tuple): - key = (key,) - - # Select entries from mean - mean = self.mean()[key] - - # Select submatrix from covariance matrix - cov = self.cov().reshape(self.mean().shape + self.mean().shape) - cov = cov[key][tuple([slice(None)] * mean.ndim) + key] - - if cov.ndim > 2: - cov = cov.reshape(mean.size, mean.size) - - return Normal(mean=mean, cov=cov, random_state=self.random_state) - - def reshape(self, newshape): - try: - reshaped_mean = self.mean().reshape(newshape) - except ValueError: - raise ValueError( - f"Cannot reshape this normal distribution to the given shape: " - f"{newshape}" - ) - - reshaped_cov = self.cov() - - if reshaped_mean.ndim > 0 and reshaped_cov.ndim == 0: - reshaped_cov = reshaped_cov.reshape(1, 1) - - return Normal( - mean=reshaped_mean, cov=reshaped_cov, random_state=self.random_state, - ) - - def _reshape_inplace(self, newshape): - self.mean.shape = newshape - - if self.mean().ndim > 0 and self.cov().ndim == 0: - self.cov().shape = (1, 1) - - def transpose(self, *axes): - if len(axes) == 1 and isinstance(axes[0], tuple): - axes = axes[0] - elif (len(axes) == 1 and axes[0] is None) or len(axes) == 0: - axes = tuple(reversed(range(self.mean().ndim))) - - mean_t = self.mean().transpose(*axes).copy() - - # Transpose covariance - cov_axes = axes + tuple(mean_t.ndim + axis for axis in axes) - cov_t = self.cov().reshape(self.mean().shape + self.mean().shape) - cov_t = cov_t.transpose(*cov_axes).copy() - cov_t = cov_t.reshape(mean_t.size, mean_t.size) - - return Normal(mean=mean_t, cov=cov_t, random_state=self.random_state) - - # Binary arithmetic - - def __add__(self, other): - """ - Addition of Gaussian random variables. - """ - if isinstance(other, Dirac): - return Normal( - mean=self.mean() + other.mean(), - cov=self.cov(), - random_state=self.random_state, - ) - elif isinstance(other, type(self)): - if self.random_state is not None and other.random_state is not None: - warnings.warn( - "When adding random variables with set random states " - "only the first is preserved." - ) - try: - return Normal( - mean=self.mean() + other.mean(), - cov=self.cov() + other.cov(), - random_state=self.random_state, - ) - except ValueError: - return NotImplemented - else: - return NotImplemented - - def __sub__(self, other): - """ - Subtraction of Gaussian random variables. - """ - if isinstance(other, Dirac): - return self + (-other) - elif isinstance(other, type(self)): - if self.random_state is not None and other.random_state is not None: - warnings.warn( - "When adding random variables with set random states " - "only the first is preserved." - ) - try: - return Normal( - mean=self.mean() - other.mean(), - cov=self.cov() + other.cov(), - random_state=self.random_state, - ) - except ValueError: - return NotImplemented - else: - return NotImplemented - - def __mul__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - delta = other.mean() - if delta == 0: - return Dirac(support=0 * self.mean(), random_state=self.random_state) - else: - return Normal( - mean=self.mean() * delta, - cov=self.cov() * delta ** 2, - random_state=self.random_state, - ) - else: - return NotImplemented - - def __truediv__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if other == 0: - raise ZeroDivisionError("Division by zero not supported.") - else: - if isinstance(other, Dirac): - return self * operator.inv(other) - else: - return NotImplemented - - def __pow__(self, power, modulo=None): - return NotImplemented - - # Binary arithmetic with reflected operands - - def __radd__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - return self + other - else: - return NotImplemented - - def __rsub__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - return operator.neg(self) + other - else: - return NotImplemented - - def __rmul__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - return self * other - else: - return NotImplemented - - def __rmatmul__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - delta = other.mean() - newmean = delta @ self.mean() - newcov = delta @ (self.cov() @ delta.transpose()) - return Normal(mean=newmean, cov=newcov, random_state=self.random_state) - return NotImplemented - - def __rtruediv__(self, other): - """ - Only works if the other dist. is a Dirac. - """ - if isinstance(other, Dirac): - return operator.inv(self) * other - else: - return NotImplemented - - def __rpow__(self, power, modulo=None): - return NotImplemented - - # Unary arithmetic - - def __neg__(self): - """ - Negation of r.v. - """ - try: - return Normal( - mean=-self.mean(), cov=self.cov(), random_state=self.random_state - ) - except Exception: - return NotImplemented - - def __pos__(self): - try: - return Normal( - mean=operator.pos(self.mean()), - cov=self.cov(), - random_state=self.random_state, - ) - except Exception: - return NotImplemented - - def __abs__(self): - try: - # todo: add absolute moments of normal - # (see: https://arxiv.org/pdf/1209.4340.pdf) - return Distribution( - parameters={}, - sample=lambda size: operator.abs(self.sample(size=size)), - random_state=self.random_state, - ) - except Exception: - return NotImplemented - - def __invert__(self): - try: - return Distribution( - parameters={}, - sample=lambda size: operator.abs(self.sample(size=size)), - random_state=self.random_state, - ) - except Exception: - return NotImplemented - - -def _both_are_univariate(mean, cov): - """ - Checks whether mean and kernels correspond to the - UNIVARIATE normal distribution. - """ - return np.isscalar(mean) and np.isscalar(cov) - - -def _both_are_multivariate(mean, cov): - """ - Checks whether mean and kernels correspond to the - MULTI- or MATRIXVARIATE normal distribution. - """ - mean_is_multivar = isinstance(mean, (np.ndarray, scipy.sparse.spmatrix,)) - cov_is_multivar = isinstance(cov, (np.ndarray, scipy.sparse.spmatrix,)) - return mean_is_multivar and cov_is_multivar and len(mean.shape) == 1 - - -def _both_are_matrixvariate(mean, cov): - """ - Checks whether mean and kernels correspond to the - MULTI- or MATRIXVARIATE normal distribution. - """ - mean_is_multivar = isinstance(mean, (np.ndarray, scipy.sparse.spmatrix,)) - cov_is_multivar = isinstance(cov, (np.ndarray, scipy.sparse.spmatrix,)) - return mean_is_multivar and cov_is_multivar and len(mean.shape) > 1 - - -def _both_are_symmkronidfactors(mean, cov): - """ - Checks whether mean OR (!) kernels correspond to the - OPERATORVARIATE normal distribution. - """ - mean_is_opvariate = isinstance(mean, scipy.sparse.linalg.LinearOperator) - cov_is_opvariate = isinstance(cov, scipy.sparse.linalg.LinearOperator) - if mean_is_opvariate or cov_is_opvariate: - return isinstance(cov, linops.SymmetricKronecker) and cov._ABequal - else: - return False - - -def _both_are_operatorvariate(mean, cov): - """ - Checks whether mean OR (!) kernels correspond to the - OPERATORVARIATE normal distribution. - """ - mean_is_opvariate = isinstance(mean, scipy.sparse.linalg.LinearOperator) - cov_is_opvariate = isinstance(cov, scipy.sparse.linalg.LinearOperator) - return mean_is_opvariate or cov_is_opvariate - - -class _UnivariateNormal(Normal): - """ - The univariate normal distribution. - """ - - def __init__(self, mean=0.0, cov=1.0, random_state=None): - super().__init__( - mean=_utils.as_numpy_scalar(mean), - cov=_utils.as_numpy_scalar(cov), - random_state=random_state, - ) - - def var(self): - return self.cov() - - def pdf(self, x): - return scipy.stats.norm.pdf(x, loc=self.mean(), scale=self.std()) - - def logpdf(self, x): - return scipy.stats.norm.logpdf(x, loc=self.mean(), scale=self.std()) - - def cdf(self, x): - return scipy.stats.norm.cdf(x, loc=self.mean(), scale=self.std()) - - def logcdf(self, x): - return scipy.stats.norm.logcdf(x, loc=self.mean(), scale=self.std()) - - def sample(self, size=()): - return scipy.stats.norm.rvs( - loc=self.mean(), scale=self.std(), size=size, random_state=self.random_state - ) - - def transpose(self, *axes): - return Normal( - mean=self.mean().copy(), - cov=self.cov().copy(), - random_state=self.random_state, - ) - - def _reshape_inplace(self, newshape): - raise NotImplementedError - - -class _MultivariateNormal(Normal): - """ - The multivariate normal distribution. - """ - - def __init__(self, mean, cov, random_state=None): - """ - Checks if mean and kernels have matching shapes before - initialising. - """ - meandim = np.prod(mean.shape) - if len(cov.shape) != 2: - raise ValueError("Covariance must be a 2D matrix " "or linear operator.") - if meandim != cov.shape[0] or meandim != cov.shape[1]: - raise ValueError( - "Shape mismatch of mean and kernels. Total " - "number of elements of the mean must match the " - "first and second dimension of the kernels." - ) - super().__init__(mean=mean, cov=cov, random_state=random_state) - - def var(self): - return np.diag(self.cov()) - - def pdf(self, x): - return scipy.stats.multivariate_normal.pdf(x, mean=self.mean(), cov=self.cov()) - - def logpdf(self, x): - return scipy.stats.multivariate_normal.logpdf( - x, mean=self.mean(), cov=self.cov() - ) - - def cdf(self, x): - return scipy.stats.multivariate_normal.cdf(x, mean=self.mean(), cov=self.cov()) - - def logcdf(self, x): - return scipy.stats.multivariate_normal.logcdf( - x, mean=self.mean(), cov=self.cov() - ) - - def sample(self, size=()): - return scipy.stats.multivariate_normal.rvs( - mean=self.mean(), cov=self.cov(), size=size, random_state=self.random_state - ) - - # Arithmetic Operations - - def __matmul__(self, other): - """ - Only works if other is a Dirac, which implies - matrix-multiplication with some constant. - """ - if isinstance(other, Dirac): - delta = other.mean() - newmean = self.mean() @ delta - newcov = delta.T @ (self.cov() @ delta) - if np.isscalar(newmean) and np.isscalar(newcov): - return _UnivariateNormal( - mean=newmean, cov=newcov, random_state=self.random_state - ) - else: - return _MultivariateNormal( - mean=newmean, cov=newcov, random_state=self.random_state - ) - else: - return NotImplemented - - def __rmatmul__(self, other): - """ - Only works if other is a Dirac, which implies - matrix-multiplication with some constant. - """ - if isinstance(other, Dirac): - delta = other.mean() - newmean = delta @ self.mean() - newcov = delta @ (self.cov() @ delta.T) - if np.isscalar(newmean) and np.isscalar(newcov): - return _UnivariateNormal( - mean=newmean, cov=newcov, random_state=self.random_state - ) - else: - return _MultivariateNormal( - mean=newmean, cov=newcov, random_state=self.random_state - ) - else: - return NotImplemented - - -class _MatrixvariateNormal(Normal): - """ - The matrix-variate normal distribution. - """ - - def __init__(self, mean, cov, random_state=None): - """ - Checks if mean and kernels have matching shapes before - initialising. - """ - _mean_dim = np.prod(mean.shape) - if len(cov.shape) != 2: - raise ValueError("Covariance must be a 2D matrix.") - if _mean_dim != cov.shape[0] or _mean_dim != cov.shape[1]: - raise ValueError( - "Shape mismatch of mean and kernels. Total " - "number of elements of the mean must match the " - "first and second dimension of the kernels." - ) - super().__init__(mean=mean, cov=cov, random_state=random_state) - - def var(self): - return np.diag(self.cov()) - - def pdf(self, x): - pdf_ravelled = scipy.stats.multivariate_normal.pdf( - x.ravel(), mean=self.mean().ravel(), cov=self.cov() - ) - return pdf_ravelled.reshape(newshape=self.shape) - - def logpdf(self, x): - raise NotImplementedError - - def cdf(self, x): - raise NotImplementedError - - def logcdf(self, x): - raise NotImplementedError - - def sample(self, size=()): - ravelled = scipy.stats.multivariate_normal.rvs( - mean=self.mean().ravel(), - cov=self.cov(), - size=size, - random_state=self.random_state, - ) - return ravelled.reshape(ravelled.shape[:-1] + self.shape) - - # Arithmetic Operations - # TODO: implement special rules for matrix-variate RVs and Kronecker - # structured covariances (see e.g. p.64 Thm. 2.3.10 of Gupta: - # Matrix-variate Distributions) - - def __matmul__(self, other): - if isinstance(other, Dirac): - # delta = other.mean() - raise NotImplementedError - return NotImplemented - - -class _OperatorvariateNormal(Normal): - """ - A normal distribution over finite dimensional linear operators. - """ - - def __init__(self, mean, cov, random_state=None): - """ - Checks shapes of mean and cov depending on the type - of operator that cov is before initialising. - """ - self._mean_dim = np.prod(mean.shape) - if isinstance(cov, linops.Kronecker): - _check_shapes_if_kronecker(mean, cov) - elif isinstance(cov, linops.SymmetricKronecker): - _check_shapes_if_symmetric_kronecker(mean, cov) - elif self._mean_dim != cov.shape[0] or self._mean_dim != cov.shape[1]: - raise ValueError("Shape mismatch of mean and kernels.") - super().__init__(mean=mean, cov=cov, random_state=random_state) - - def var(self): - return linops.Diagonal(Op=self.cov()) - - def _params_todense(self): - """Returns the mean and kernels of a distribution as dense matrices.""" - if isinstance(self.mean(), linops.LinearOperator): - mean = self.mean().todense() - else: - mean = self.mean() - if isinstance(self.cov(), linops.LinearOperator): - cov = self.cov().todense() - else: - cov = self.cov() - return mean, cov - - def pdf(self, x): - raise NotImplementedError - - def logpdf(self, x): - raise NotImplementedError - - def cdf(self, x): - raise NotImplementedError - - def logcdf(self, x): - raise NotImplementedError - - def sample(self, size=()): - mean, cov = self._params_todense() - samples_ravelled = scipy.stats.multivariate_normal.rvs( - mean=mean.ravel(), cov=cov, size=size, random_state=self.random_state - ) - return samples_ravelled.reshape(samples_ravelled.shape[:-1] + self.mean().shape) - - def reshape(self, newshape): - raise NotImplementedError - - def _reshape_inplace(self, newshape): - raise NotImplementedError - - def transpose(self, *axes): - raise NotImplementedError - - # Arithmetic Operations - - # TODO: implement special rules for matrix-variate RVs and - # Kronecker structured covariances - # (see e.g. p.64 Thm. 2.3.10 of Gupta: Matrix-variate Distributions) - def __matmul__(self, other): - if isinstance(other, Dirac): - othermean = other.mean() - delta = linops.Kronecker(linops.Identity(othermean.shape[0]), othermean) - return Normal( - mean=self.mean() @ othermean, - cov=delta.T @ (self.cov() @ delta), - random_state=self.random_state, - ) - return NotImplemented - - -def _check_shapes_if_kronecker(mean, cov): - """ - If mean has dimension (m x n) then kernels factors must be - (m x m) and (n x n) - """ - m, n = mean.shape - if ( - m != cov.A.shape[0] - or m != cov.A.shape[1] - or n != cov.B.shape[0] - or n != cov.B.shape[1] - ): - raise ValueError( - "Kronecker structured kernels must have " - "factors with the same shape as the mean." - ) - - -class _SymmetricKroneckerIdenticalFactorsNormal(_OperatorvariateNormal): - """ - Normal distribution with symmetric Kronecker structured - kernels with identical factors V (x)_s V. - """ - - def __init__(self, mean, cov, random_state=None): - _check_shapes_if_symmetric_kronecker(mean, cov) - self._n = mean.shape[1] - super().__init__(mean=mean, cov=cov, random_state=random_state) - - def sample(self, size=()): - - # Draw standard normal samples - if np.isscalar(size): - size = [size] - size_sample = [self._n * self._n] + list(size) - stdnormal_samples = scipy.stats.norm.rvs( - size=size_sample, random_state=self.random_state - ) - - # Cholesky decomposition - eps = 10 ** -12 # damping needed to avoid negative definite covariances - cholA = scipy.linalg.cholesky( - self.cov().A.todense() + eps * np.eye(self._n), lower=True - ) - - # Scale and shift - # TODO: can we avoid todense here and just return operator samples? - if isinstance(self.mean(), scipy.sparse.linalg.LinearOperator): - mean = self.mean().todense() - else: - mean = self.mean() - - # Appendix E: Bartels, S., Probabilistic Linear Algebra, PhD Thesis 2019 - samples_scaled = linops.Symmetrize(dim=self._n) @ ( - linops.Kronecker(A=cholA, B=cholA) @ stdnormal_samples - ) - - return mean[None, :, :] + samples_scaled.T.reshape(-1, self._n, self._n) - - -def _check_shapes_if_symmetric_kronecker(mean, cov): - """ - Mean has to be square. If mean has dimension (n x n) then kernels - factors must be (n x n). - """ - m, n = mean.shape - if m != n or n != cov.A.shape[0] or n != cov.B.shape[1]: - raise ValueError( - "Normal distributions with symmetric " - "Kronecker structured kernels must " - "have square mean and square " - "kernels factors with matching " - "dimensions." - ) diff --git a/src/probnum/prob/random_variable/__init__.py b/src/probnum/prob/random_variable/__init__.py new file mode 100644 index 0000000000..e60ecdcc37 --- /dev/null +++ b/src/probnum/prob/random_variable/__init__.py @@ -0,0 +1,2 @@ +from ._dirac import Dirac +from ._normal import Normal diff --git a/src/probnum/prob/random_variable/_arithmetic.py b/src/probnum/prob/random_variable/_arithmetic.py new file mode 100644 index 0000000000..51255eae46 --- /dev/null +++ b/src/probnum/prob/random_variable/_arithmetic.py @@ -0,0 +1,96 @@ +import operator + +from probnum.prob._random_variable import RandomVariable +from probnum.prob.random_variable import Dirac, Normal + + +def add(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_add_fns, rv1, rv2) + + +def sub(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_sub_fns, rv1, rv2) + + +def mul(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_mul_fns, rv1, rv2) + + +def matmul(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_matmul_fns, rv1, rv2) + + +def truediv(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_truediv_fns, rv1, rv2) + + +def floordiv(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_floordiv_fns, rv1, rv2) + + +def mod(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_mod_fns, rv1, rv2) + + +def divmod_(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_divmod_fns, rv1, rv2) + + +def pow_(rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + return _apply(_pow_fns, rv1, rv2) + + +# Operator registry +def _apply(op_registry, rv1: RandomVariable, rv2: RandomVariable) -> RandomVariable: + key = (type(rv1), type(rv2)) + + if key not in op_registry: + return NotImplemented + + res = op_registry[key](rv1, rv2) + + return res + + +_add_fns = {} +_sub_fns = {} +_mul_fns = {} +_matmul_fns = {} +_truediv_fns = {} +_floordiv_fns = {} +_mod_fns = {} +_divmod_fns = {} +_pow_fns = {} + + +def _swap_operands(fn): + return lambda op1, op2: fn(op2, op1) + + +# Dirac +_add_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.add) +_sub_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.sub) +_mul_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.mul) +_matmul_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.matmul) +_truediv_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.truediv) +_floordiv_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.floordiv) +_mod_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.mod) +_divmod_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(divmod) +_pow_fns[(Dirac, Dirac)] = Dirac._binary_operator_factory(operator.pow) + +# Normal +_add_fns[(Normal, Normal)] = Normal._add_normal +_add_fns[(Normal, Dirac)] = Normal._add_dirac +_add_fns[(Dirac, Normal)] = _swap_operands(Normal._add_dirac) + +_sub_fns[(Normal, Normal)] = Normal._sub_normal +_sub_fns[(Normal, Dirac)] = Normal._sub_dirac +_sub_fns[(Dirac, Normal)] = _swap_operands(Normal._rsub_dirac) + +_mul_fns[(Normal, Dirac)] = Normal._mul_dirac +_mul_fns[(Dirac, Normal)] = _swap_operands(Normal._mul_dirac) + +_matmul_fns[(Normal, Dirac)] = Normal._matmul_dirac +_matmul_fns[(Dirac, Normal)] = _swap_operands(Normal._rmatmul_dirac) + +_truediv_fns[(Normal, Dirac)] = Normal._truediv_dirac diff --git a/src/probnum/prob/random_variable/_dirac.py b/src/probnum/prob/random_variable/_dirac.py new file mode 100644 index 0000000000..9b6f38d2fa --- /dev/null +++ b/src/probnum/prob/random_variable/_dirac.py @@ -0,0 +1,154 @@ +from typing import Callable, Optional, TypeVar + +import numpy as np + +from probnum import utils as _utils +from probnum.prob import _random_variable + + +_ValueType = TypeVar("ValueType") + + +class Dirac(_random_variable.RandomVariable[_ValueType]): + """ + The Dirac delta distribution. + + This distribution models a point mass and can be useful to represent + numbers as random variables with Dirac measure. It has the useful + property that arithmetic operations between a :class:`Dirac` random + variable and an arbitrary :class:`RandomVariable` acts in the same + way as the arithmetic operation with a constant. + + Note, that a Dirac measure does not admit a probability density + function but can be viewed as a distribution (generalized function). + + Parameters + ---------- + support : scalar or array-like or LinearOperator + The support of the dirac delta function. + + See Also + -------- + RandomVariable : Class representing general random variables. + + Examples + -------- + >>> from probnum.prob import random_variable as rv + >>> rv1 = rv.Dirac(support=0.) + >>> rv2 = rv.Dirac(support=1.) + >>> rv = rv1 + rv2 + >>> rv.sample(size=5) + array([1., 1., 1., 1., 1.]) + """ + + def __init__( + self, + support: _ValueType, + random_state: Optional[_random_variable.RandomStateType] = None, + ): + if np.isscalar(support): + support = _utils.as_numpy_scalar(support) + + self._support = support + + super().__init__( + shape=support.shape, + dtype=support.dtype, + random_state=random_state, + parameters={"support": support}, + sample=self._sample, + in_support=lambda x: np.all(x == support), + cdf=lambda x: 0.0 if np.any(x < self._support) else 0.0, + cov=lambda: np.zeros_like( # pylint: disable=unexpected-keyword-arg + self._support, + shape=( + (self._support.size, self._support.size) + if self._support.ndim > 0 + else () + ), + ), + var=lambda: np.zeros_like(self._support), + properties={ + "mode": self._support, # TODO: Is this correct? Diracs don't have a pdf + "median": self._support, + "mean": self._support, + }, + ) + + @property + def support(self): + return self._support + + def __getitem__(self, key): + """ + Marginalization for multivariate Dirac distributions, expressed by means of + (advanced) indexing, masking and slicing. + + This method supports all modes of array indexing presented in + + https://numpy.org/doc/1.19/reference/arrays.indexing.html. + + Parameters + ---------- + key : int or slice or ndarray or tuple of None, int, slice, or ndarray + Indices, slice objects and/or boolean masks specifying which entries to keep + while marginalizing over all other entries. + """ + return Dirac(support=self._support[key], random_state=self.random_state) + + def reshape(self, newshape): + return Dirac( + support=self._support.reshape(newshape), + random_state=_utils.derive_random_seed(self.random_state), + ) + + def transpose(self, *axes): + return Dirac( + support=self._support.transpose(*axes), + random_state=_utils.derive_random_seed(self.random_state), + ) + + def _sample(self, size=()): + if isinstance(size, int): + size = (size,) + + if size == (): + return self._support.copy() + else: + return np.tile(self._support, reps=size + (1,) * self.ndim) + + # Unary arithmetic operations + + def __neg__(self) -> "Dirac": + return Dirac( + support=-self.support, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def __pos__(self) -> "Dirac": + return Dirac( + support=+self.support, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def __abs__(self) -> "Dirac": + return Dirac( + support=abs(self.support), + random_state=_utils.derive_random_seed(self.random_state), + ) + + # Binary arithmetic operations + + @staticmethod + def _binary_operator_factory( + operator: Callable[[_ValueType, _ValueType], _ValueType] + ) -> Callable[["Dirac", "Dirac"], "Dirac"]: + def _dirac_operator(dirac_rv1: Dirac, dirac_rv2: Dirac) -> Dirac: + return Dirac( + support=operator(dirac_rv1, dirac_rv2), + random_state=_utils.derive_random_seed( + dirac_rv1.random_state, dirac_rv2.random_state, + ), + ) + + return _dirac_operator diff --git a/src/probnum/prob/random_variable/_normal.py b/src/probnum/prob/random_variable/_normal.py new file mode 100644 index 0000000000..7273b37cd4 --- /dev/null +++ b/src/probnum/prob/random_variable/_normal.py @@ -0,0 +1,587 @@ +from typing import Optional, Union + +import numpy as np +import scipy.stats + +from probnum import prob, utils as _utils +from probnum.linalg import linops +from probnum.prob import _random_variable + + +_ValueType = Union[np.floating, np.ndarray, linops.LinearOperator] + + +class Normal(_random_variable.RandomVariable[np.ndarray]): + """ + The normal distribution. + + The Gaussian distribution is ubiquitous in probability theory, since + it is the final and stable or equilibrium distribution to which + other distributions gravitate under a wide variety of smooth + operations, e.g., convolutions and stochastic transformations. + One example of this is the central limit theorem. The Gaussian + distribution is also attractive from a numerical point of view as it + is maintained through many transformations (e.g. it is stable). + + Parameters + ---------- + mean : float or array-like or LinearOperator + Mean of the normal distribution. + + cov : float or array-like or LinearOperator + (Co-)variance of the normal distribution. + + random_state : None or int or :class:`~numpy.random.RandomState` instance, optional + This parameter defines the RandomState object to + use for drawing realizations from this + distribution. Think of it like a random seed. + If None (or np.random), the global + np.random state is used. If integer, it is used to + seed the local + :class:`~numpy.random.RandomState` instance. + Default is None. + + See Also + -------- + Distribution : Class representing general probability distributions. + + Examples + -------- + >>> from probnum.prob import Normal + >>> N = Normal(mean=0.5, cov=1.0) + >>> N.parameters + {'mean': 0.5, 'cov': 1.0} + """ + + # pylint: disable=too-many-locals,too-many-branches,too-many-statements + def __init__( + self, + mean: Union[float, np.floating, np.ndarray, linops.LinearOperator], + cov: Union[float, np.floating, np.ndarray, linops.LinearOperator], + random_state: Optional[_random_variable.RandomStateType] = None, + ): + # Type normalization + if np.isscalar(mean): + mean = _utils.as_numpy_scalar(mean) + + if np.isscalar(cov): + cov = _utils.as_numpy_scalar(cov) + + # Shape checking + if len(mean.shape) not in [0, 1, 2]: + raise ValueError( + f"Gaussian random variables must either be scalars, vectors, or " + f"matrices (or linear operators), but the given mean is a {mean.ndim}-" + f"dimensional tensor." + ) + + expected_cov_shape = (np.prod(mean.shape),) * 2 if len(mean.shape) > 0 else () + + if len(cov.shape) != len(expected_cov_shape) or cov.shape != expected_cov_shape: + raise ValueError( + f"The covariance matrix must be of shape {expected_cov_shape}, but " + f"shape {cov.shape} was given." + ) + + self._mean = mean + self._cov = cov + + self.__getitem = None + self.__reshape = None + self.__transpose = None + + # Method selection + properties = {} + + univariate = len(mean.shape) == 0 + dense = isinstance(mean, np.ndarray) and isinstance(cov, np.ndarray) + operator = isinstance(mean, linops.LinearOperator) or isinstance( + cov, linops.LinearOperator + ) + + if univariate: + # Univariate Gaussian + sample = self._univariate_sample + in_support = Normal._univariate_in_support + pdf = self._univariate_pdf + logpdf = self._univariate_logpdf + cdf = self._univariate_cdf + logcdf = self._univariate_logcdf + quantile = None # TODO + + properties["median"] = self._mean + properties["var"] = self._cov + entropy = self._univariate_entropy + + self.__getitem = self._numpy_getitem + self.__reshape = self._numpy_reshape + self.__transpose = self._numpy_transpose + elif dense: + # Multi- and matrixvariate Gaussians with dense mean and covariance + sample = self._dense_sample + in_support = Normal._dense_in_support + pdf = self._dense_pdf + logpdf = self._dense_logpdf + cdf = self._dense_cdf + logcdf = self._dense_logcdf + quantile = None + entropy = self._dense_entropy + + self.__getitem = self._numpy_getitem + self.__reshape = self._numpy_reshape + self.__transpose = self._numpy_transpose + elif operator: + # Operatorvariate Gaussians + if isinstance(cov, linops.Kronecker): + m, n = mean.shape + + if ( + m != cov.A.shape[0] + or m != cov.A.shape[1] + or n != cov.B.shape[0] + or n != cov.B.shape[1] + ): + raise ValueError( + "Kronecker structured kernels must have factors with the same " + "shape as the mean." + ) + elif isinstance(cov, linops.SymmetricKronecker): + m, n = mean.shape + + if m != n or n != cov.A.shape[0] or n != cov.B.shape[1]: + raise ValueError( + "Normal distributions with symmetric Kronecker structured " + "kernels must have square mean and square kernels factors with " + "matching dimensions." + ) + + in_support = None + sample = self._operatorvariate_sample + pdf = None + logpdf = None + cdf = None + logcdf = None + quantile = None + entropy = None + + if isinstance(cov, linops.SymmetricKronecker) and cov._ABequal: + sample = self._symmetric_kronecker_identical_factors_sample + else: + raise ValueError( + f"Cannot instantiate normal distribution with mean of type " + f"{mean.__class__.__name__} and kernels of type " + f"{cov.__class__.__name__}." + ) + + properties["mode"] = self._mean + properties["mean"] = self._mean + properties["cov"] = self._cov + + super().__init__( + shape=mean.shape, + dtype=mean.dtype, + random_state=random_state, + parameters={"mean": self._mean, "cov": self._cov}, + sample=sample, + in_support=in_support, + pdf=pdf, + logpdf=logpdf, + cdf=cdf, + logcdf=logcdf, + quantile=quantile, + entropy=entropy, + properties=properties, + ) + + def __getitem__(self, key): + """ + Marginalization in multi- and matrixvariate normal distributions, expressed by + means of (advanced) indexing, masking and slicing. + + We support all modes of array indexing presented in + + https://numpy.org/doc/1.19/reference/arrays.indexing.html. + + Note that, currently, this method does not work for normal distributions other + than the multi- and matrixvariate versions. + + Parameters + ---------- + key : int or slice or ndarray or tuple of None, int, slice, or ndarray + Indices, slice objects and/or boolean masks specifying which entries to keep + while marginalizing over all other entries. + """ + + if self.__getitem is None: + raise NotImplementedError + + return self.__getitem(key) + + def _numpy_getitem(self, key): + if not isinstance(key, tuple): + key = (key,) + + # Select entries from mean + mean = self._mean[key] + + # Select submatrix from covariance matrix + cov = self._cov.reshape(self.shape + self.shape) + cov = cov[key][tuple([slice(None)] * mean.ndim) + key] + + if mean.ndim > 0: + cov = cov.reshape(mean.size, mean.size) + + return Normal( + mean=mean, + cov=cov, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def reshape(self, newshape): + if self.__reshape is None: + raise NotImplementedError + + return self.__reshape(newshape) + + def _numpy_reshape(self, newshape): + try: + reshaped_mean = self._mean.reshape(newshape) + except ValueError: + raise ValueError( + f"Cannot reshape this normal random variable to the given shape: " + f"{newshape}" + ) + + reshaped_cov = self._cov + + if reshaped_mean.ndim > 0 and reshaped_cov.ndim == 0: + reshaped_cov = reshaped_cov.reshape(1, 1) + + return Normal( + mean=reshaped_mean, + cov=reshaped_cov, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def transpose(self, *axes): + if self.__transpose is None: + raise NotImplementedError + + return self.__transpose(*axes) + + def _numpy_transpose(self, *axes): + if len(axes) == 1 and isinstance(axes[0], tuple): + axes = axes[0] + elif (len(axes) == 1 and axes[0] is None) or len(axes) == 0: + axes = tuple(reversed(range(self.ndim))) + + mean_t = self._mean.transpose(*axes).copy() + + # Transpose covariance + cov_axes = axes + tuple(mean_t.ndim + axis for axis in axes) + cov_t = self._cov.reshape(self.shape + self.shape) + cov_t = cov_t.transpose(*cov_axes).copy() + + if mean_t.ndim > 0: + cov_t = cov_t.reshape(mean_t.size, mean_t.size) + + return Normal( + mean=mean_t, + cov=cov_t, + random_state=_utils.derive_random_seed(self.random_state), + ) + + # Unary arithmetic operations + + def __neg__(self) -> "Normal": + return Normal( + mean=-self._mean, + cov=self._cov, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def __pos__(self) -> "Normal": + return Normal( + mean=+self._mean, + cov=self._cov, + random_state=_utils.derive_random_seed(self.random_state), + ) + + def __abs__(self): # pylint: disable=useless-super-delegation + # TODO: Add absolute moments of normal (https://arxiv.org/pdf/1209.4340.pdf) + return super().__abs__() + + # Binary arithmetic operations + + def _add_normal(self, other: "Normal") -> "Normal": + if other.shape != self.shape: + raise ValueError( + "Addition of two normally distributed random variables is only " + "possible if both operands have the same shape." + ) + + return Normal( + mean=self._mean + other._mean, + cov=self._cov + other._cov, + random_state=_utils.derive_random_seed( + self.random_state, other.random_state + ), + ) + + def _add_dirac(self, dirac_rv: "probnum.prob.random_variable.Dirac") -> "Normal": + return Normal( + mean=self._mean + dirac_rv.support, + cov=self._cov, + random_state=_utils.derive_random_seed( + self.random_state, dirac_rv.random_state + ), + ) + + def _sub_normal(self, other: "Normal") -> "Normal": + if other.shape != self.shape: + raise ValueError( + "Subtraction of two normally distributed random variables is only " + "possible if both operands have the same shape." + ) + + return Normal( + mean=self._mean - other._mean, + cov=self._cov + other._cov, + random_state=_utils.derive_random_seed( + self.random_state, other.random_state + ), + ) + + def _sub_dirac(self, dirac_rv: "probnum.prob.random_variable.Dirac") -> "Normal": + return Normal( + mean=self._mean - dirac_rv.support, + cov=self._cov, + random_state=_utils.derive_random_seed( + self.random_state, dirac_rv.random_state + ), + ) + + def _rsub_dirac(self, dirac_rv: "probnum.prob.random_variable.Dirac") -> "Normal": + return Normal( + mean=dirac_rv.support - self._mean, + cov=self._cov, + random_state=_utils.derive_random_seed( + dirac_rv.random_state, self.random_state + ), + ) + + def _matmul_dirac(self, dirac_rv: "probnum.prob.random_variable.Dirac") -> "Normal": + if self.ndim == 1 or (self.ndim == 2 and self.shape[0] == 1): + return Normal( + mean=self._mean @ dirac_rv.support, + cov=dirac_rv.support.T @ (self._cov @ dirac_rv.support), + random_state=_utils.derive_random_seed( + self.random_state, dirac_rv.random_state + ), + ) + elif self.ndim == 2 and self.shape[0] > 1: + cov_update = linops.Kronecker( + linops.Identity(dirac_rv.shape[0]), dirac_rv.support + ) + + return Normal( + mean=self._mean @ dirac_rv.support, + cov=cov_update.T @ (self._cov @ cov_update), + random_state=_utils.derive_random_seed( + self.random_state, dirac_rv.random_state + ), + ) + else: + raise TypeError( + "Currently, matrix multiplication is only supported for vector- and " + "matrix-variate Gaussians." + ) + + def _rmatmul_dirac( + self, dirac_rv: "probnum.prob.random_variable.Dirac" + ) -> "Normal": + if self.ndim != 1 or (self.ndim == 2 and self.shape[1] == 1): + raise TypeError( + "Currently, matrix multiplication is only supported for vector-variate " + "Gaussians." + ) + + return Normal( + mean=dirac_rv.support @ self._mean, + cov=dirac_rv.support @ (self._cov @ dirac_rv.support.T), + random_state=_utils.derive_random_seed( + dirac_rv.random_state, self.random_state + ), + ) + + def _mul_dirac( + self, dirac_rv: "probnum.prob.random_variable.Dirac" + ) -> Union["Normal", "probnum.prob.random_variable.Dirac"]: + if dirac_rv.size == 1: + return self._scale(dirac_rv.support, dirac_rv.random_state) + + return NotImplemented + + def _truediv_dirac( + self, dirac_rv: "probnum.prob.random_variable.Dirac" + ) -> Union["Normal", "probnum.prob.random_variable.Dirac"]: + if dirac_rv.size == 1: + if dirac_rv.support == 0: + raise ZeroDivisionError + + return self._scale(1.0 / dirac_rv.support, dirac_rv.random_state) + + return NotImplemented + + def _scale(self, scalar, other_random_state=None): + assert scalar.size == 1 + + if other_random_state is None: + derived_random_seed = _utils.derive_random_seed(self.random_state) + else: + derived_random_seed = _utils.derive_random_seed( + self.random_state, other_random_state + ) + + if scalar == 0: + return prob.random_variable.Dirac( + support=np.zeros_like(self._mean), random_state=derived_random_seed, + ) + else: + return Normal( + mean=scalar * self._mean, + cov=(scalar ** 2) * self._cov, + random_state=derived_random_seed, + ) + + # Univariate Gaussians + def _univariate_sample(self, size=()) -> np.generic: + sample = scipy.stats.norm.rvs( + loc=self.mean, scale=self.std, size=size, random_state=self.random_state + ) + + if np.isscalar(sample): + sample = _utils.as_numpy_scalar(sample, dtype=self.dtype) + else: + sample = sample.astype(self.dtype) + + return sample + + @staticmethod + def _univariate_in_support(x) -> bool: + return np.isfinite(x) + + def _univariate_pdf(self, x) -> float: + return scipy.stats.norm.pdf(x, loc=self.mean, scale=self.std) + + def _univariate_logpdf(self, x) -> float: + return scipy.stats.norm.logpdf(x, loc=self.mean, scale=self.std) + + def _univariate_cdf(self, x) -> float: + return scipy.stats.norm.cdf(x, loc=self.mean, scale=self.std) + + def _univariate_logcdf(self, x) -> float: + return scipy.stats.norm.logcdf(x, loc=self.mean, scale=self.std) + + def _univariate_entropy(self) -> float: + return scipy.stats.norm.entropy(loc=self.mean, scale=self.std) + + # Multi- and matrixvariate Gaussians with dense covariance + def _dense_sample(self, size=()) -> np.ndarray: + sample = scipy.stats.multivariate_normal.rvs( + mean=self._mean.ravel(), + cov=self._cov, + size=size, + random_state=self.random_state, + ) + + return sample.reshape(sample.shape[:-1] + self.shape) + + @staticmethod + def _dense_in_support(x) -> bool: + return np.all(np.isfinite(x)) + + def _dense_pdf(self, x) -> float: + return scipy.stats.multivariate_normal.pdf( + x.ravel(), mean=self._mean.ravel(), cov=self._cov + ) + + def _dense_logpdf(self, x) -> float: + return scipy.stats.multivariate_normal.logpdf( + x.ravel(), mean=self._mean.ravel(), cov=self._cov + ) + + def _dense_cdf(self, x) -> float: + return scipy.stats.multivariate_normal.cdf( + x.ravel(), mean=self._mean.ravel(), cov=self._cov + ) + + def _dense_logcdf(self, x) -> float: + return scipy.stats.multivariate_normal.logcdf( + x.ravel(), mean=self._mean.ravel(), cov=self._cov + ) + + def _dense_entropy(self) -> float: + return scipy.stats.multivariate_normal.entropy( + mean=self._mean.ravel(), cov=self._cov, + ) + + # Operatorvariate Gaussians + def _operatorvariate_params_todense(self): + if isinstance(self._mean, linops.LinearOperator): + mean = self._mean.todense() + else: + mean = self._mean + + if isinstance(self._cov, linops.LinearOperator): + cov = self._cov.todense() + else: + cov = self._cov + + return mean, cov + + def _operatorvariate_sample(self, size=()) -> np.ndarray: + mean, cov = self._operatorvariate_params_todense() + + sample = scipy.stats.multivariate_normal.rvs( + mean=mean.ravel(), cov=cov, size=size, random_state=self.random_state, + ) + + return sample.reshape(sample.shape[:-1] + self.shape) + + # Operatorvariate Gaussian with symmetric Kronecker covariance from identical + # factors + def _symmetric_kronecker_identical_factors_sample(self, size=()): + assert isinstance(self._cov, linops.SymmetricKronecker) and self._cov._ABequal + + n = self._mean.shape[1] + + # Draw standard normal samples + if np.isscalar(size): + size = (size,) + + size_sample = (n * n,) + size + + stdnormal_samples = scipy.stats.norm.rvs( + size=size_sample, random_state=self.random_state + ) + + # Cholesky decomposition + eps = 10 ** -12 # damping needed to avoid negative definite covariances + cholA = scipy.linalg.cholesky( + self._cov.A.todense() + eps * np.eye(n), lower=True + ) + + # Scale and shift + # TODO: can we avoid todense here and just return operator samples? + if isinstance(self._mean, scipy.sparse.linalg.LinearOperator): + mean = self._mean.todense() + else: + mean = self._mean + + # Appendix E: Bartels, S., Probabilistic Linear Algebra, PhD Thesis 2019 + samples_scaled = linops.Symmetrize(dim=n) @ ( + linops.Kronecker(A=cholA, B=cholA) @ stdnormal_samples + ) + + return mean[None, :, :] + samples_scaled.T.reshape(-1, n, n) diff --git a/src/probnum/prob/randomvariable.py b/src/probnum/prob/randomvariable.py deleted file mode 100644 index 15155fb426..0000000000 --- a/src/probnum/prob/randomvariable.py +++ /dev/null @@ -1,490 +0,0 @@ -""" -Random Variables. - -This module implements random variables. Random variables are the main in- and outputs -of probabilistic numerical methods. -""" - -import operator - -import numpy as np -import scipy.stats -import scipy.sparse -import scipy._lib._util - -from probnum.prob.distributions.distribution import Distribution -from probnum.prob.distributions.dirac import Dirac -from probnum.prob.distributions.normal import Normal - - -class RandomVariable: - """ - Random variables are the main objects used by probabilistic numerical methods. - - Every probabilistic numerical method takes a random variable encoding the prior - distribution as input and outputs a random variable whose distribution encodes the - uncertainty arising from finite computation. The generic signature of a - probabilistic numerical method is: - - ``output_rv = probnum_method(input_rv, method_params)`` - - In practice, most random variables used by methods in ProbNum have Dirac or Gaussian - measure. - - Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and - linear operators. This may change their ``distribution`` and not necessarily all - previously available methods are retained. - - Parameters - ---------- - shape : tuple - Shape of realizations of this random variable. - dtype : numpy.dtype or object - Data type of realizations of this random variable. If ``object`` will be - converted to ``numpy.dtype``. - distribution : Distribution - Probability distribution of the random variable. - - See Also - -------- - asrandvar : Transform into a :class:`RandomVariable`. - Distribution : A class representing probability distributions. - - Examples - -------- - """ - - def __init__(self, distribution=None, shape=None, dtype=None): - """Create a new random variable.""" - self._set_distribution(distribution) - self._set_dtype(distribution, dtype) - self._set_shape(distribution, shape) - - def _set_distribution(self, distribution): - """ - Set distribution of random variable. - """ - if isinstance(distribution, Distribution): - self._distribution = distribution - elif distribution is None: - self._distribution = Distribution() - else: - raise ValueError( - "The distribution parameter must be an " "instance of `Distribution`." - ) - - # TODO: add some type checking (e.g. for shape as a tuple of ints) and extract as - # function - def _set_shape(self, distribution, shape): - """ - Sets shape in accordance with distribution. - """ - self._shape = shape - if distribution is not None: - if distribution.shape is not None: - if shape is None or distribution.shape == shape: - self._shape = distribution.shape - else: - raise ValueError( - "Shape of distribution and given shape do not match." - ) - else: - self.distribution.reshape(newshape=shape) - - def _set_dtype(self, distribution, dtype): - """ - Sets dtype in accordance with distribution. - """ - self._dtype = dtype - if dtype is not None: - if isinstance(dtype, np.dtype): - self._dtype = dtype - else: - self._dtype = np.dtype(dtype) - if distribution is not None: - if self.dtype is None: - self._dtype = distribution.dtype - elif self.dtype != distribution.dtype: - # Change distribution dtype if random variable type is different - distribution.dtype = dtype - if self._dtype is None: - raise ValueError("No 'dtype' set.") - - def __repr__(self): - if self.dtype is None: - dt = "unspecified dtype" - else: - dt = "dtype=" + str(self.dtype) - return "<%s %s with %s>" % (str(self.shape), self.__class__.__name__, dt) - - @property - def distribution(self): - """Distribution of random variable.""" - return self._distribution - - def mean(self): - """Expected value of the random variable.""" - if self._distribution is not None: - try: - return self._distribution.mean() - except KeyError: - raise NotImplementedError( - "Underlying {} has no mean.".format( - type(self._distribution).__name__ - ) - ) - else: - raise NotImplementedError("No underlying distribution specified.") - - def cov(self): - """Covariance operator of the random variable""" - if self._distribution is not None: - try: - return self._distribution.cov() - except KeyError: - raise NotImplementedError( - "Underlying {} has no kernels.".format( - type(self._distribution).__name__ - ) - ) - else: - raise NotImplementedError("No underlying distribution specified.") - - @property - def shape(self): - """Shape of realizations of the random variable.""" - return self._shape - - @shape.setter - def shape(self, newshape): - self._distribution.shape = newshape - - self._set_shape(self._distribution, newshape) - - @property - def dtype(self): - """Data type of (elements of) a realization of this random variable.""" - return self._dtype - - @property - def random_state(self): - """Random state of the random variable induced by its ``distribution``.""" - return self._distribution.random_state - - @random_state.setter - def random_state(self, seed): - """ Get or set the RandomState object of the underlying distribution. - - This can be either None or an existing :class:`~numpy.random.RandomState` - object. If None (or np.random), use the :class:`~numpy.random.RandomState` - singleton used by np.random. If already a :class:`~numpy.random.RandomState` - instance, use it. If an int, use a new :class:`~numpy.random.RandomState` - instance seeded with seed. - """ - self.distribution._random_state = scipy._lib._util.check_random_state(seed) - - def sample(self, size=()): - """ - Draw realizations from a random variable. - - Parameters - ---------- - size : tuple - Size of the drawn sample of realizations. - - Returns - ------- - sample : array-like - Sample of realizations with the given ``size`` and the inherent ``shape``. - """ - if self._distribution is not None: - return self._distribution.sample(size=size) - else: - raise NotImplementedError("No sampling method provided.") - - def reshape(self, newshape): - """ - Give a new shape to a random variable. - - Parameters - ---------- - newshape : int or tuple of ints - New shape for the random variable. It must be compatible with the original - shape. - - Returns - ------- - reshaped_rv : ``self`` with the new dimensions of ``shape``. - """ - return RandomVariable(distribution=self._distribution.reshape(newshape)) - - def transpose(self, *axes): - """ - Transpose the random variable. - - Parameters - ---------- - axes : None, tuple of ints, or n ints - See documentation of numpy.ndarray.transpose. - - Returns - ------- - transposed_rv : The transposed random variable. - """ - return RandomVariable(distribution=self._distribution.transpose(*axes)) - - T = property(transpose) - - # Binary arithmetic operations - - # The below prevents numpy from calling elementwise arithmetic - # operations allowing expressions like: y = np.array([1, 1]) + RV - # to call the arithmetic operations defined by RandomVariable - # instead of elementwise. Thus no array of RandomVariables but a - # RandomVariable with the correct shape is returned. - __array_ufunc__ = None - - def _rv_from_binary_operation(self, other, op): - """ - Create a new random variable by applying a binary operation. - - Parameters - ---------- - other : object - op : callable - Binary operation. - - Returns - ------- - combined_rv : RandomVariable - Random variable resulting from ``op``. - - """ - other_rv = asrandvar(other) - combined_rv = RandomVariable( - distribution=op(self.distribution, other_rv.distribution) - ) - return combined_rv - - def __getitem__(self, key): - return RandomVariable(distribution=self.distribution[key]) - - def __add__(self, other): - return self._rv_from_binary_operation(other=other, op=operator.add) - - def __sub__(self, other): - return self._rv_from_binary_operation(other=other, op=operator.sub) - - def __mul__(self, other): - return self._rv_from_binary_operation(other=other, op=operator.mul) - - def __matmul__(self, other): - return self._rv_from_binary_operation(other=other, op=operator.matmul) - - def __truediv__(self, other): - return self._rv_from_binary_operation(other=other, op=operator.truediv) - - def __pow__(self, power, modulo=None): - return self._rv_from_binary_operation(other=power, op=operator.pow) - - # Binary arithmetic operations with reflected (swapped) operands - def __radd__(self, other): - other_rv = asrandvar(other) - return other_rv._rv_from_binary_operation(other=self, op=operator.add) - - def __rsub__(self, other): - other_rv = asrandvar(other) - return other_rv._rv_from_binary_operation(other=self, op=operator.sub) - - def __rmul__(self, other): - other_rv = asrandvar(other) - return other_rv._rv_from_binary_operation(other=self, op=operator.mul) - - def __rmatmul__(self, other): - other_rv = asrandvar(other) - return other_rv._rv_from_binary_operation(other=self, op=operator.matmul) - - def __rtruediv__(self, other): - other_rv = asrandvar(other) - return other_rv._rv_from_binary_operation(other=self, op=operator.truediv) - - def __rpow__(self, power, modulo=None): - other_rv = asrandvar(power) - return other_rv._rv_from_binary_operation(other=self, op=operator.pow) - - # Augmented arithmetic assignments (+=, -=, *=, ...) attempting to do the operation - # in place - # TODO: needs setter functions for properties `shape` and `dtype` to do in place - def __iadd__(self, other): - return NotImplemented - - def __isub__(self, other): - return NotImplemented - - def __imul__(self, other): - return NotImplemented - - def __imatmul__(self, other): - return NotImplemented - - def __itruediv__(self, other): - return NotImplemented - - def __ipow__(self, power, modulo=None): - return NotImplemented - - # Unary arithmetic operations - def __neg__(self): - return RandomVariable( - shape=self.shape, - dtype=self.dtype, - distribution=operator.neg(self.distribution), - ) - - def __pos__(self): - return RandomVariable( - shape=self.shape, - dtype=self.dtype, - distribution=operator.pos(self.distribution), - ) - - def __abs__(self): - return RandomVariable( - shape=self.shape, - dtype=self.dtype, - distribution=operator.abs(self.distribution), - ) - - -def asrandvar(obj): - """ - Return ``obj`` as a :class:`RandomVariable`. - - Converts scalars, (sparse) arrays or distribution classes to a - :class:`RandomVariable`. - - Parameters - ---------- - obj : object - Argument to be represented as a :class:`RandomVariable`. - - Returns - ------- - rv : RandomVariable - The object `obj` as a :class:`RandomVariable`. - - See Also - -------- - RandomVariable : Class representing random variables. - - Examples - -------- - >>> from scipy.stats import bernoulli - >>> from probnum.prob import asrandvar - >>> bern = bernoulli(p=0.5) - >>> bern.random_state = 42 # Seed for reproducibility - >>> b = asrandvar(bern) - >>> b.sample(size=5) - array([1, 1, 1, 0, 0]) - """ - # RandomVariable - if isinstance(obj, RandomVariable): - return obj - # Scalar - elif np.isscalar(obj): - return RandomVariable( - shape=(), dtype=type(obj), distribution=Dirac(support=obj) - ) - # Numpy array, sparse array or Linear Operator - elif isinstance( - obj, (np.ndarray, scipy.sparse.spmatrix, scipy.sparse.linalg.LinearOperator) - ): - return RandomVariable( - shape=obj.shape, dtype=obj.dtype, distribution=Dirac(support=obj) - ) - # Scipy random variable - elif isinstance( - obj, - ( - scipy.stats._distn_infrastructure.rv_frozen, - scipy.stats._multivariate.multi_rv_frozen, - ), - ): - return _scipystats_to_rv(scipydist=obj) - else: - raise ValueError( - "Argument of type {} cannot be converted to a random variable.".format( - type(obj) - ) - ) - - -def _scipystats_to_rv(scipydist): - """ - Transform SciPy distributions to Probnum :class:`RandomVariable`s. - - Parameters - ---------- - scipydist : scipy.stats._distn_infrastructure.rv_frozen or scipy.stats._multivariate.multi_rv_frozen - SciPy distribution. - - Returns - ------- - dist : RandomVariable - ProbNum random variable. - - """ # pylint: disable=line-too-long - # Univariate distributions (implemented in this package) - if isinstance(scipydist, scipy.stats._distn_infrastructure.rv_frozen): - # Normal distribution - if scipydist.dist.name == "norm": - return RandomVariable( - dtype=float, - distribution=Normal( - mean=scipydist.mean(), - cov=scipydist.var(), - random_state=scipydist.random_state, - ), - ) - # Multivariate distributions (implemented in this package) - elif isinstance(scipydist, scipy.stats._multivariate.multi_rv_frozen): - # Multivariate normal - if scipydist.__class__.__name__ == "multivariate_normal_frozen": - return RandomVariable( - shape=scipydist.mean.shape, - dtype=scipydist.mean.dtype, - distribution=Normal( - mean=scipydist.mean, - cov=scipydist.cov, - random_state=scipydist.random_state, - ), - ) - # Generic distributions - if hasattr(scipydist, "pmf"): - pdf = getattr(scipydist, "pmf", None) - logpdf = getattr(scipydist, "logpmf", None) - else: - pdf = getattr(scipydist, "pdf", None) - logpdf = getattr(scipydist, "logpdf", None) - rvsample = np.squeeze(scipydist.rvs(1)) - if np.shape(rvsample) == (): - rvdtype = type(rvsample) - rvshape = () - else: - rvdtype = rvsample.dtype - rvshape = rvsample.shape - return RandomVariable( - dtype=rvdtype, - shape=rvshape, - distribution=Distribution( - parameters={}, - pdf=pdf, - logpdf=logpdf, - cdf=getattr(scipydist, "cdf", None), - logcdf=getattr(scipydist, "logcdf", None), - sample=getattr(scipydist, "rvs", None), - mean=getattr(scipydist, "mean", None), - cov=getattr(scipydist, "var", None), - random_state=getattr(scipydist, "random_state", None), - ), - ) diff --git a/src/probnum/utils/__init__.py b/src/probnum/utils/__init__.py index 324e36032f..a41395e356 100644 --- a/src/probnum/utils/__init__.py +++ b/src/probnum/utils/__init__.py @@ -1,5 +1,6 @@ from .arrayutils import * from .fctutils import * +from .randomutils import * from .scalarutils import * # Public classes and functions. Order is reflected in documentation. @@ -7,7 +8,9 @@ "atleast_1d", "atleast_2d", "as_colvec", + "as_numpy_scalar", "assert_is_1d_ndarray", "assert_is_2d_ndarray", "assert_evaluates_to_scalar", + "derive_random_seed", ] diff --git a/src/probnum/utils/randomutils.py b/src/probnum/utils/randomutils.py new file mode 100644 index 0000000000..a96dd2ce12 --- /dev/null +++ b/src/probnum/utils/randomutils.py @@ -0,0 +1,20 @@ +from typing import Union + +import numpy as np + + +def derive_random_seed(*args: Union[np.random.RandomState, np.random.Generator]) -> int: + def _sample(rng: Union[np.random.RandomState, np.random.Generator]) -> int: + if isinstance(rng, np.random.RandomState): + return rng.randint(0, 2 ** 32, size=None, dtype=int) + elif isinstance(rng, np.random.Generator): + return rng.integers(0, 2 ** 32, size=None, dtype=int, endpoint=False) + else: + raise ValueError("Unsupported type of random number generator") + + seed = _sample(args[0]) + + for i in range(1, len(args)): + seed = seed ^ _sample(args[i]) + + return seed diff --git a/src/probnum/utils/scalarutils.py b/src/probnum/utils/scalarutils.py index 1a5723da63..a1ff4ebf9a 100644 --- a/src/probnum/utils/scalarutils.py +++ b/src/probnum/utils/scalarutils.py @@ -3,8 +3,8 @@ __all__ = ["as_numpy_scalar"] -def as_numpy_scalar(x): +def as_numpy_scalar(x, dtype=None): if not np.isscalar(x): raise ValueError("The given input is not a scalar") - return np.array([x])[0] + return np.asarray([x], dtype=dtype)[0] diff --git a/tests/test_diffeq/test_ode/test_ivp.py b/tests/test_diffeq/test_ode/test_ivp.py index e824f53d45..111a4a7a90 100644 --- a/tests/test_diffeq/test_ode/test_ivp.py +++ b/tests/test_diffeq/test_ode/test_ivp.py @@ -1,8 +1,7 @@ import unittest import numpy as np from probnum.diffeq.ode import ivp -from probnum.prob import RandomVariable -from probnum.prob.distributions import Dirac +from probnum.prob import Dirac from tests.testing import NumpyAssertions TEST_NDIM = 3 @@ -20,14 +19,14 @@ def test_logistic(self): """ Test the logistic ODE convenience function. """ - rv = RandomVariable(distribution=Dirac(0.1)) + rv = Dirac(0.1) lg1 = ivp.logistic(self.tspan, rv) self.assertEqual(issubclass(type(lg1), ivp.IVP), True) lg2 = ivp.logistic(self.tspan, rv, params=(1.0, 1.0)) self.assertEqual(issubclass(type(lg2), ivp.IVP), True) def test_logistic_jacobian(self): - rv = RandomVariable(distribution=Dirac(0.1)) + rv = Dirac(0.1) lg1 = ivp.logistic(self.tspan, rv) random_direction = 1 + 0.1 * np.random.rand(lg1.ndim) random_point = 1 + np.random.rand(lg1.ndim) @@ -44,14 +43,14 @@ def test_logistic_jacobian(self): ) def test_fitzhughnagumo(self): - rv = RandomVariable(distribution=Dirac(np.ones(2))) + rv = Dirac(np.ones(2)) lg1 = ivp.fitzhughnagumo(self.tspan, rv) self.assertEqual(issubclass(type(lg1), ivp.IVP), True) lg2 = ivp.fitzhughnagumo(self.tspan, rv, params=(1.0, 1.0, 1.0, 1.0)) self.assertEqual(issubclass(type(lg2), ivp.IVP), True) def test_fitzhughnagumo_jacobian(self): - rv = RandomVariable(distribution=Dirac(np.ones(2))) + rv = Dirac(np.ones(2)) lg1 = ivp.fitzhughnagumo(self.tspan, rv) random_direction = 1 + 0.1 * np.random.rand(lg1.ndim) random_point = 1 + np.random.rand(lg1.ndim) @@ -68,14 +67,13 @@ def test_fitzhughnagumo_jacobian(self): ) def test_lotkavolterra(self): - rv = RandomVariable(distribution=Dirac(np.ones(2))) + rv = Dirac(np.ones(2)) lg1 = ivp.lotkavolterra(self.tspan, rv) - self.assertEqual(issubclass(type(lg1), ivp.IVP), True) lg2 = ivp.lotkavolterra(self.tspan, rv, params=(1.0, 1.0, 1.0, 1.0)) self.assertEqual(issubclass(type(lg2), ivp.IVP), True) def test_lotkavolterra_jacobian(self): - rv = RandomVariable(distribution=Dirac(np.ones(2))) + rv = Dirac(np.ones(2)) lg1 = ivp.lotkavolterra(self.tspan, rv) random_direction = 1 + 0.1 * np.random.rand(lg1.ndim) random_point = 1 + np.random.rand(lg1.ndim) @@ -104,7 +102,7 @@ def sol_(t): return np.exp(-t) * np.ones(TEST_NDIM) some_center = np.random.rand(TEST_NDIM) - rv = RandomVariable(distribution=Dirac(some_center)) + rv = Dirac(some_center) self.mockivp = ivp.IVP( (0.0, np.random.rand()), rv, rhs=rhs_, jac=jac_, sol=sol_ ) diff --git a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py index 9111464273..70765c1c45 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py @@ -12,14 +12,14 @@ from probnum.diffeq import IBM, ivp2filter, lotkavolterra from probnum.filtsmooth import ExtendedKalman, UnscentedKalman -from probnum.prob import Dirac, RandomVariable +from probnum.prob import Dirac from tests.testing import NumpyAssertions class Ivp2FilterTestCase(unittest.TestCase, NumpyAssertions): def setUp(self): """We need a Prior object and an IVP object (with derivatives) to run the tests.""" - y0 = RandomVariable(distribution=Dirac(np.array([20.0, 15.0]))) + y0 = Dirac(np.array([20.0, 15.0])) self.ivp = lotkavolterra([0.4124, 1.15124], y0) self.prior = IBM(ordint=2, spatialdim=2, diffconst=1.7685) self.evlvar = 0.0005123121 @@ -51,14 +51,14 @@ def test_ekf0_initialdistribution(self): filtsmooth_object = ivp2filter.ivp2ekf0(self.ivp, self.prior, self.evlvar) expected_initval = np.array( [ - self.ivp.initrv.mean(), - self.ivp(self.ivp.t0, self.ivp.initrv.mean()), - self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean()) - @ self.ivp(self.ivp.t0, self.ivp.initrv.mean()), + self.ivp.initrv.mean, + self.ivp(self.ivp.t0, self.ivp.initrv.mean), + self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean) + @ self.ivp(self.ivp.t0, self.ivp.initrv.mean), ] ) self.assertAllClose( - filtsmooth_object.initialrandomvariable.mean(), expected_initval.T.flatten() + filtsmooth_object.initialrandomvariable.mean, expected_initval.T.flatten() ) @@ -88,14 +88,14 @@ def test_ekf1_initialdistribution(self): filtsmooth_object = ivp2filter.ivp2ekf1(self.ivp, self.prior, self.evlvar) expected_initval = np.array( [ - self.ivp.initrv.mean(), - self.ivp(self.ivp.t0, self.ivp.initrv.mean()), - self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean()) - @ self.ivp(self.ivp.t0, self.ivp.initrv.mean()), + self.ivp.initrv.mean, + self.ivp(self.ivp.t0, self.ivp.initrv.mean), + self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean) + @ self.ivp(self.ivp.t0, self.ivp.initrv.mean), ] ) self.assertAllClose( - filtsmooth_object.initialrandomvariable.mean(), expected_initval.T.flatten() + filtsmooth_object.initialrandomvariable.mean, expected_initval.T.flatten() ) @@ -125,12 +125,12 @@ def test_ukf_initialdistribution(self): filtsmooth_object = ivp2filter.ivp2ukf(self.ivp, self.prior, self.evlvar) expected_initval = np.array( [ - self.ivp.initrv.mean(), - self.ivp(self.ivp.t0, self.ivp.initrv.mean()), - self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean()) - @ self.ivp(self.ivp.t0, self.ivp.initrv.mean()), + self.ivp.initrv.mean, + self.ivp(self.ivp.t0, self.ivp.initrv.mean), + self.ivp.jacobian(self.ivp.t0, self.ivp.initrv.mean) + @ self.ivp(self.ivp.t0, self.ivp.initrv.mean), ] ) self.assertAllClose( - filtsmooth_object.initialrandomvariable.mean(), expected_initval.T.flatten() + filtsmooth_object.initialrandomvariable.mean, expected_initval.T.flatten() ) diff --git a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py index f288985e41..04bbd6a9a8 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py @@ -16,7 +16,7 @@ from probnum.diffeq.odefiltsmooth import probsolve_ivp from probnum.diffeq import ode -from probnum.prob import RandomVariable, Dirac +from probnum.prob import Dirac from tests.testing import NumpyAssertions @@ -28,7 +28,7 @@ class TestConvergenceOnLogisticODE(unittest.TestCase): def setUp(self): """Setup odesolver and solve a scalar ode""" - initrv = RandomVariable(distribution=Dirac(0.1 * np.ones(1))) + initrv = Dirac(0.1 * np.ones(1)) self.ivp = ode.logistic([0.0, 1.5], initrv) self.stps = [0.2, 0.1] @@ -130,7 +130,7 @@ class TestFirstIterations(unittest.TestCase, NumpyAssertions): """ def setUp(self): - initrv = RandomVariable(distribution=Dirac(0.1 * np.ones(1))) + initrv = Dirac(0.1 * np.ones(1)) self.ivp = ode.logistic([0.0, 1.5], initrv) self.step = 0.5 sol = probsolve_ivp( @@ -141,7 +141,7 @@ def setUp(self): def test_t0(self): exp_mean = np.array( - [self.ivp.initrv.mean(), self.ivp.rhs(0, self.ivp.initrv.mean())] + [self.ivp.initrv.mean, self.ivp.rhs(0, self.ivp.initrv.mean)] ) self.assertAllClose(self.ms[0], exp_mean[:, 0], rtol=1e-14) @@ -154,7 +154,7 @@ def test_t1(self): GaussianIVPFilter.solve() and not in Prop. 1 of Schober et al., 2019. """ - y0 = self.ivp.initrv.mean() + y0 = self.ivp.initrv.mean z0 = self.ivp.rhs(0, y0) z1 = self.ivp.rhs(0, y0 + self.step * z0) exp_mean = np.array([y0 + 0.5 * self.step * (z0 + z1), z1]) @@ -170,7 +170,7 @@ class TestAdaptivityOnLotkaVolterra(unittest.TestCase): def setUp(self): """Setup odesolver and solve a scalar ode""" - initrv = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initrv = Dirac(20 * np.ones(2)) self.ivp = ode.lotkavolterra([0.0, 0.5], initrv) self.tol = 1e-2 @@ -199,7 +199,7 @@ class TestLotkaVolterraOtherPriors(unittest.TestCase): def setUp(self): """Setup odesolver and Lotka-Volterra IVP""" - initrv = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initrv = Dirac(20 * np.ones(2)) self.ivp = ode.lotkavolterra([0.0, 0.5], initrv) self.tol = 1e-1 self.step = 0.1 @@ -272,7 +272,7 @@ class TestConvergenceOnLogisticODESmoother(unittest.TestCase): def setUp(self): """Setup odesolver and solve a scalar ode""" - initrv = RandomVariable(distribution=Dirac(0.1 * np.ones(1))) + initrv = Dirac(0.1 * np.ones(1)) self.ivp = ode.logistic([0.0, 1.5], initrv) self.stps = [0.2, 0.1] @@ -376,7 +376,7 @@ class TestAdaptivityOnLotkaVolterraSmoother(unittest.TestCase): def setUp(self): """Setup odesolver and solve a scalar ode""" - initrv = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initrv = Dirac(20 * np.ones(2)) self.ivp = ode.lotkavolterra([0.0, 0.5], initrv) self.tol = 1e-2 @@ -405,7 +405,7 @@ class TestLotkaVolterraOtherPriorsSmoother(unittest.TestCase): def setUp(self): """Setup odesolver and Lotka-Volterra IVP""" - initdist = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initdist = Dirac(20 * np.ones(2)) self.ivp = ode.lotkavolterra([0.0, 0.5], initdist) self.tol = 1e-1 self.step = 0.1 diff --git a/tests/test_diffeq/test_odefiltsmooth/test_prior.py b/tests/test_diffeq/test_odefiltsmooth/test_prior.py index 8d93550d8b..1e02343b11 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_prior.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_prior.py @@ -10,8 +10,7 @@ import numpy as np -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal from probnum.diffeq.odefiltsmooth import prior from tests.testing import NumpyAssertions @@ -57,11 +56,11 @@ def setUp(self): def test_chapmankolmogorov(self): mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) - initrv = RandomVariable(distribution=Normal(mean, cov)) + initrv = distribution = Normal(mean, cov) cke, __ = self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) - self.assertAllClose(AH_22_IBM @ initrv.mean(), cke.mean(), 1e-14) + self.assertAllClose(AH_22_IBM @ initrv.mean, cke.mean, 1e-14) self.assertAllClose( - AH_22_IBM @ initrv.cov() @ AH_22_IBM.T + QH_22_IBM, cke.cov(), 1e-14 + AH_22_IBM @ initrv.cov @ AH_22_IBM.T + QH_22_IBM, cke.cov, 1e-14 ) @@ -73,12 +72,12 @@ def setUp(self): def test_chapmankolmogorov(self): mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) - initrv = RandomVariable(distribution=Normal(mean, cov)) + initrv = Normal(mean, cov) cke, __ = self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) - self.assertAllClose(AH_21_PRE @ initrv.mean(), cke.mean(), 1e-14) + self.assertAllClose(AH_21_PRE @ initrv.mean, cke.mean, 1e-14) self.assertAllClose( - AH_21_PRE @ initrv.cov() @ AH_21_PRE.T + QH_21_PRE, cke.cov(), 1e-14 + AH_21_PRE @ initrv.cov @ AH_21_PRE.T + QH_21_PRE, cke.cov, 1e-14 ) @@ -89,7 +88,7 @@ def setUp(self): def test_chapmankolmogorov(self): mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) - initrv = RandomVariable(distribution=Normal(mean, cov)) + initrv = Normal(mean, cov) self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) def test_asymptotically_ibm(self): @@ -145,5 +144,5 @@ def test_larger_shape(self): def test_chapmankolmogorov(self): mean, cov = np.ones(self.mat1.ndim), np.eye(self.mat1.ndim) - initrv = RandomVariable(distribution=Normal(mean, cov)) + initrv = Normal(mean, cov) self.mat1.chapmankolmogorov(0.0, STEP, STEP, initrv) diff --git a/tests/test_diffeq/test_odesolution.py b/tests/test_diffeq/test_odesolution.py index c11e9ed3df..e82b4bf265 100644 --- a/tests/test_diffeq/test_odesolution.py +++ b/tests/test_diffeq/test_odesolution.py @@ -1,7 +1,7 @@ import unittest import numpy as np -from probnum.prob import RandomVariable, Dirac +from probnum.prob import Dirac from probnum.prob._randomvariablelist import _RandomVariableList from probnum.diffeq import probsolve_ivp from probnum.diffeq.ode import lotkavolterra, logistic @@ -11,7 +11,7 @@ class TestODESolution(unittest.TestCase, NumpyAssertions): def setUp(self): - initrv = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initrv = Dirac(20 * np.ones(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 self.solution = probsolve_ivp(self.ivp, step=step) @@ -28,11 +28,11 @@ def test_t(self): self.assertApproxEqual(self.solution.t[-1], self.ivp.tmax) def test_getitem(self): - self.assertArrayEqual(self.solution[0].mean(), self.solution.y[0].mean()) - self.assertArrayEqual(self.solution[0].cov(), self.solution.y[0].cov()) + self.assertArrayEqual(self.solution[0].mean, self.solution.y[0].mean) + self.assertArrayEqual(self.solution[0].cov, self.solution.y[0].cov) - self.assertArrayEqual(self.solution[-1].mean(), self.solution.y[-1].mean()) - self.assertArrayEqual(self.solution[-1].cov(), self.solution.y[-1].cov()) + self.assertArrayEqual(self.solution[-1].mean, self.solution.y[-1].mean) + self.assertArrayEqual(self.solution[-1].cov, self.solution.y[-1].cov) self.assertArrayEqual(self.solution[:].mean(), self.solution.y[:].mean()) self.assertArrayEqual(self.solution[:].cov(), self.solution.y[:].cov()) @@ -65,21 +65,15 @@ def test_call_interpolation(self): def test_call_correctness(self): t = self.ivp.t0 + 1e-6 self.assertAllClose( - self.solution[0].mean(), self.solution(t).mean(), atol=1e-4, rtol=0 + self.solution[0].mean, self.solution(t).mean, atol=1e-4, rtol=0 ) def test_call_endpoints(self): - self.assertArrayEqual( - self.solution(self.ivp.t0).mean(), self.solution[0].mean() - ) - self.assertArrayEqual(self.solution(self.ivp.t0).cov(), self.solution[0].cov()) + self.assertArrayEqual(self.solution(self.ivp.t0).mean, self.solution[0].mean) + self.assertArrayEqual(self.solution(self.ivp.t0).cov, self.solution[0].cov) - self.assertArrayEqual( - self.solution(self.ivp.tmax).mean(), self.solution[-1].mean() - ) - self.assertArrayEqual( - self.solution(self.ivp.tmax).cov(), self.solution[-1].cov() - ) + self.assertArrayEqual(self.solution(self.ivp.tmax).mean, self.solution[-1].mean) + self.assertArrayEqual(self.solution(self.ivp.tmax).cov, self.solution[-1].cov) def test_call_extrapolation(self): t = 1.1 * self.ivp.tmax @@ -91,7 +85,7 @@ class TestODESolutionHigherOrderPrior(TestODESolution): """Same as above, but higher-order prior to test for a different dimensionality""" def setUp(self): - initrv = RandomVariable(distribution=Dirac(20 * np.ones(2))) + initrv = Dirac(20 * np.ones(2)) self.ivp = lotkavolterra([0.0, 0.5], initrv) step = 0.1 self.solution = probsolve_ivp(self.ivp, which_prior="ibm3", step=step) @@ -101,7 +95,7 @@ class TestODESolutionOneDimODE(TestODESolution): """Same as above, but 1d IVP to test for a different dimensionality""" def setUp(self): - initrv = RandomVariable(distribution=Dirac(0.1 * np.ones(1))) + initrv = Dirac(0.1 * np.ones(1)) self.ivp = logistic([0.0, 1.5], initrv) step = 0.1 self.solution = probsolve_ivp(self.ivp, which_prior="ibm3", step=step) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py index 4a5528e5c2..73e7b5f099 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py @@ -12,7 +12,7 @@ generate_cd, DiscreteGaussianModel, ) -from probnum.prob import RandomVariable, Normal +from probnum.prob import Normal from tests.testing import NumpyAssertions __all__ = [ @@ -47,7 +47,7 @@ def setup_cartracking(self): self.measmod = DiscreteGaussianLTIModel( dynamat=self.measmat, forcevec=np.zeros(2), diffmat=self.measdiff ) - self.initrv = RandomVariable(distribution=Normal(self.mean, self.cov)) + self.initrv = Normal(self.mean, self.cov) self.tms = np.arange(0, 20, self.delta_t) self.states, self.obs = generate_dd( self.dynmod, self.measmod, self.initrv, self.tms @@ -76,7 +76,7 @@ def setup_ornsteinuhlenbeck(self): self.measmod = DiscreteGaussianLTIModel( dynamat=np.eye(1), forcevec=np.zeros(1), diffmat=self.r * np.eye(1) ) - self.initrv = RandomVariable(distribution=Normal(10 * np.ones(1), np.eye(1))) + self.initrv = Normal(10 * np.ones(1), np.eye(1)) self.tms = np.arange(0, 20, self.delta_t) self.states, self.obs = generate_cd( dynmod=self.dynmod, measmod=self.measmod, initrv=self.initrv, times=self.tms @@ -119,7 +119,7 @@ def dh(t, x): initcov = var * np.eye(2) self.dynamod = DiscreteGaussianModel(f, lambda t: q, df) self.measmod = DiscreteGaussianModel(h, lambda t: self.r, dh) - self.initrv = RandomVariable(distribution=Normal(initmean, initcov)) + self.initrv = Normal(initmean, initcov) self.tms = np.arange(0, 4, delta_t) self.q = q self.states, self.obs = generate_dd( diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py index 8e16da08ec..410c1f1198 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py @@ -36,20 +36,20 @@ def test_initialdistribution(self): def test_predict(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().ndim, 1) - self.assertEqual(pred.mean().shape[0], 4) - self.assertEqual(pred.cov().ndim, 2) - self.assertEqual(pred.cov().shape[0], 4) - self.assertEqual(pred.cov().shape[1], 4) + self.assertEqual(pred.mean.ndim, 1) + self.assertEqual(pred.mean.shape[0], 4) + self.assertEqual(pred.cov.ndim, 2) + self.assertEqual(pred.cov.shape[0], 4) + self.assertEqual(pred.cov.shape[1], 4) def test_update(self): - data = self.measmod.sample(0.0, self.initrv.mean()) + data = self.measmod.sample(0.0, self.initrv.mean) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().ndim, 1) - self.assertEqual(upd.mean().shape[0], 4) - self.assertEqual(upd.cov().ndim, 2) - self.assertEqual(upd.cov().shape[0], 4) - self.assertEqual(upd.cov().shape[1], 4) + self.assertEqual(upd.mean.ndim, 1) + self.assertEqual(upd.mean.shape[0], 4) + self.assertEqual(upd.cov.ndim, 2) + self.assertEqual(upd.cov.shape[0], 4) + self.assertEqual(upd.cov.shape[1], 4) def test_filtsmooth(self): """ @@ -115,8 +115,8 @@ def test_initialdistribution(self): def test_predict_shape(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().shape, (1,)) - self.assertEqual(pred.cov().shape, (1, 1)) + self.assertEqual(pred.mean.shape, (1,)) + self.assertEqual(pred.cov.shape, (1, 1)) def test_predict_value(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) @@ -126,16 +126,16 @@ def test_predict_value(self): / (2 * self.lam) * (1 - scipy.linalg.expm(2 * self.drift * self.delta_t)) ) - expectedmean = np.squeeze(ah @ (self.initrv.mean() * np.ones(1))) - expectedcov = np.squeeze(ah @ (self.initrv.cov() * np.eye(1)) @ ah.T + qh) - self.assertApproxEqual(expectedmean, pred.mean()) - self.assertApproxEqual(expectedcov, pred.cov()) + expectedmean = np.squeeze(ah @ (self.initrv.mean * np.ones(1))) + expectedcov = np.squeeze(ah @ (self.initrv.cov * np.eye(1)) @ ah.T + qh) + self.assertApproxEqual(expectedmean, pred.mean) + self.assertApproxEqual(expectedcov, pred.cov) def test_update(self): - data = self.measmod.sample(0.0, self.initrv.mean() * np.ones(1)) + data = self.measmod.sample(0.0, self.initrv.mean * np.ones(1)) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().shape, (1,)) - self.assertEqual(upd.cov().shape, (1, 1)) + self.assertEqual(upd.mean.shape, (1,)) + self.assertEqual(upd.cov.shape, (1, 1)) def test_smoother(self): """ diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py index 1be02cad3c..3d679ac9b0 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py @@ -32,20 +32,20 @@ def test_initialdistribution(self): def test_predict(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().ndim, 1) - self.assertEqual(pred.mean().shape[0], 4) - self.assertEqual(pred.cov().ndim, 2) - self.assertEqual(pred.cov().shape[0], 4) - self.assertEqual(pred.cov().shape[1], 4) + self.assertEqual(pred.mean.ndim, 1) + self.assertEqual(pred.mean.shape[0], 4) + self.assertEqual(pred.cov.ndim, 2) + self.assertEqual(pred.cov.shape[0], 4) + self.assertEqual(pred.cov.shape[1], 4) def test_update(self): - data = self.measmod.sample(0.0, self.initrv.mean()) + data = self.measmod.sample(0.0, self.initrv.mean) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().ndim, 1) - self.assertEqual(upd.mean().shape[0], 4) - self.assertEqual(upd.cov().ndim, 2) - self.assertEqual(upd.cov().shape[0], 4) - self.assertEqual(upd.cov().shape[1], 4) + self.assertEqual(upd.mean.ndim, 1) + self.assertEqual(upd.mean.shape[0], 4) + self.assertEqual(upd.cov.ndim, 2) + self.assertEqual(upd.cov.shape[0], 4) + self.assertEqual(upd.cov.shape[1], 4) def test_filtsmooth(self): """ @@ -110,8 +110,8 @@ def test_initialdistribution(self): def test_predict_shape(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().shape, (1,)) - self.assertEqual(pred.cov().shape, (1, 1)) + self.assertEqual(pred.mean.shape, (1,)) + self.assertEqual(pred.cov.shape, (1, 1)) def test_predict_value(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) @@ -121,16 +121,16 @@ def test_predict_value(self): / (2 * self.lam) * (1 - scipy.linalg.expm(2 * self.drift * self.delta_t)) ) - expectedmean = np.squeeze(ah @ (self.initrv.mean() * np.ones(1))) - expectedcov = np.squeeze(ah @ (self.initrv.cov() * np.eye(1)) @ ah.T + qh) - self.assertApproxEqual(expectedmean, pred.mean()) - self.assertApproxEqual(expectedcov, pred.cov()) + expectedmean = np.squeeze(ah @ (self.initrv.mean * np.ones(1))) + expectedcov = np.squeeze(ah @ (self.initrv.cov * np.eye(1)) @ ah.T + qh) + self.assertApproxEqual(expectedmean, pred.mean) + self.assertApproxEqual(expectedcov, pred.cov) def test_update(self): - data = np.array([self.measmod.sample(0.0, self.initrv.mean() * np.ones(1))]) + data = np.array([self.measmod.sample(0.0, self.initrv.mean * np.ones(1))]) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().shape, (1,)) - self.assertEqual(upd.cov().shape, (1, 1)) + self.assertEqual(upd.mean.shape, (1,)) + self.assertEqual(upd.cov.shape, (1, 1)) def test_smoother(self): """ diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py index b9a87848ce..ad1403af46 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py @@ -27,26 +27,16 @@ def test_locations(self): self.assertApproxEqual(self.posterior.locations[-1], self.tms[-1]) def test_getitem(self): - self.assertArrayEqual( - self.posterior[0].mean(), self.posterior.state_rvs[0].mean() - ) - self.assertArrayEqual( - self.posterior[0].cov(), self.posterior.state_rvs[0].cov() - ) + self.assertArrayEqual(self.posterior[0].mean, self.posterior.state_rvs[0].mean) + self.assertArrayEqual(self.posterior[0].cov, self.posterior.state_rvs[0].cov) self.assertArrayEqual( - self.posterior[-1].mean(), self.posterior.state_rvs[-1].mean() - ) - self.assertArrayEqual( - self.posterior[-1].cov(), self.posterior.state_rvs[-1].cov() + self.posterior[-1].mean, self.posterior.state_rvs[-1].mean ) + self.assertArrayEqual(self.posterior[-1].cov, self.posterior.state_rvs[-1].cov) - self.assertArrayEqual( - self.posterior[:].mean(), self.posterior.state_rvs[:].mean() - ) - self.assertArrayEqual( - self.posterior[:].cov(), self.posterior.state_rvs[:].cov() - ) + self.assertArrayEqual(self.posterior[:].mean, self.posterior.state_rvs[:].mean) + self.assertArrayEqual(self.posterior[:].cov, self.posterior.state_rvs[:].cov) def test_state_rvs(self): self.assertTrue(isinstance(self.posterior.state_rvs, _RandomVariableList)) @@ -67,21 +57,17 @@ def test_call_interpolation(self): def test_call_to_discrete(self): self.assertEqual(self.tms[0], 0) - self.assertArrayEqual(self.posterior(0.0).mean(), self.posterior[0].mean()) - self.assertArrayEqual(self.posterior(0.0).cov(), self.posterior[0].cov()) + self.assertArrayEqual(self.posterior(0.0).mean, self.posterior[0].mean) + self.assertArrayEqual(self.posterior(0.0).cov, self.posterior[0].cov) self.assertEqual(self.tms[-1], 19.8) - self.assertArrayEqual(self.posterior(19.8).mean(), self.posterior[-1].mean()) - self.assertArrayEqual(self.posterior(19.8).cov(), self.posterior[-1].cov()) + self.assertArrayEqual(self.posterior(19.8).mean, self.posterior[-1].mean) + self.assertArrayEqual(self.posterior(19.8).cov, self.posterior[-1].cov) + self.assertArrayEqual(self.posterior(self.tms[2]).mean, self.posterior[2].mean) + self.assertArrayEqual(self.posterior(self.tms[5]).mean, self.posterior[5].mean) self.assertArrayEqual( - self.posterior(self.tms[2]).mean(), self.posterior[2].mean() - ) - self.assertArrayEqual( - self.posterior(self.tms[5]).mean(), self.posterior[5].mean() - ) - self.assertArrayEqual( - self.posterior(self.tms[10]).mean(), self.posterior[10].mean() + self.posterior(self.tms[10]).mean, self.posterior[10].mean ) def test_call_extrapolation(self): diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py index 4fc3d359e2..9f3145bf70 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py @@ -39,20 +39,20 @@ def test_initialdistribution(self): def test_predict(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().ndim, 1) - self.assertEqual(pred.mean().shape[0], 4) - self.assertEqual(pred.cov().ndim, 2) - self.assertEqual(pred.cov().shape[0], 4) - self.assertEqual(pred.cov().shape[1], 4) + self.assertEqual(pred.mean.ndim, 1) + self.assertEqual(pred.mean.shape[0], 4) + self.assertEqual(pred.cov.ndim, 2) + self.assertEqual(pred.cov.shape[0], 4) + self.assertEqual(pred.cov.shape[1], 4) def test_update(self): - data = self.measmod.sample(0.0, self.initrv.mean()) + data = self.measmod.sample(0.0, self.initrv.mean) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().ndim, 1) - self.assertEqual(upd.mean().shape[0], 4) - self.assertEqual(upd.cov().ndim, 2) - self.assertEqual(upd.cov().shape[0], 4) - self.assertEqual(upd.cov().shape[1], 4) + self.assertEqual(upd.mean.ndim, 1) + self.assertEqual(upd.mean.shape[0], 4) + self.assertEqual(upd.cov.ndim, 2) + self.assertEqual(upd.cov.shape[0], 4) + self.assertEqual(upd.cov.shape[1], 4) def test_filtsmooth(self): """ @@ -121,8 +121,8 @@ def test_initialdistribution(self): def test_predict_shape(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) - self.assertEqual(pred.mean().shape, (1,)) - self.assertEqual(pred.cov().shape, (1, 1)) + self.assertEqual(pred.mean.shape, (1,)) + self.assertEqual(pred.cov.shape, (1, 1)) def test_predict_value(self): pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) @@ -132,16 +132,16 @@ def test_predict_value(self): / (2 * self.lam) * (1 - scipy.linalg.expm(2 * self.drift * self.delta_t)) ) - expectedmean = np.squeeze(ah @ (self.initrv.mean() * np.ones(1))) - expectedcov = np.squeeze(ah @ (self.initrv.cov() * np.eye(1)) @ ah.T + qh) - self.assertApproxEqual(expectedmean, pred.mean()) - self.assertApproxEqual(expectedcov, pred.cov()) + expectedmean = np.squeeze(ah @ (self.initrv.mean * np.ones(1))) + expectedcov = np.squeeze(ah @ (self.initrv.cov * np.eye(1)) @ ah.T + qh) + self.assertApproxEqual(expectedmean, pred.mean) + self.assertApproxEqual(expectedcov, pred.cov) def test_update(self): - data = self.measmod.sample(0.0, self.initrv.mean() * np.ones(1)) + data = self.measmod.sample(0.0, self.initrv.mean * np.ones(1)) upd, __, __, __ = self.method.update(0.0, self.initrv, data) - self.assertEqual(upd.mean().shape, (1,)) - self.assertEqual(upd.cov().shape, (1, 1)) + self.assertEqual(upd.mean.shape, (1,)) + self.assertEqual(upd.cov.shape, (1, 1)) def test_smoother(self): """ @@ -258,8 +258,7 @@ def test_filtsmooth(self): # # from probnum.filtsmooth.statespace import util # from probnum.filtsmooth.gaussfiltsmooth import unscentedkalman -# from probnum.prob import RandomVariable -# from probnum.prob.distributions import Normal +# from probnum.prob import Normal # from probnum.filtsmooth.statespace.continuous.linearsdemodel import * # from probnum.filtsmooth.statespace.discrete.discretegaussianmodel import * # @@ -293,7 +292,7 @@ def test_filtsmooth(self): # DYNADIFF) # self.measmod = DiscreteGaussianLTIModel(MEASMAT, np.zeros(len(MEASMAT)), # MEASDIFF) -# self.initrv = RandomVariable(distribution=Normal(MEAN, COV)) +# self.initrv = Normal(MEAN, COV) # self.ukf = unscentedkalman.UnscentedKalmanFilter(self.dynmod, # self.measmod, # self.initrv, @@ -328,22 +327,22 @@ def test_filtsmooth(self): # """ # """ # pred, __ = self.ukf.predict(0., DELTA_T, self.initrv) -# self.assertEqual(pred.mean().ndim, 1) -# self.assertEqual(pred.mean().shape[0], 4) -# self.assertEqual(pred.cov().ndim, 2) -# self.assertEqual(pred.cov().shape[0], 4) -# self.assertEqual(pred.cov().shape[1], 4) +# self.assertEqual(pred.mean.ndim, 1) +# self.assertEqual(pred.mean.shape[0], 4) +# self.assertEqual(pred.cov.ndim, 2) +# self.assertEqual(pred.cov.shape[0], 4) +# self.assertEqual(pred.cov.shape[1], 4) # # def test_update(self): # """ # """ -# data = self.measmod.sample(0., self.initrv.mean()*np.ones(1)) +# data = self.measmod.sample(0., self.initrv.mean*np.ones(1)) # upd, __, __, __ = self.ukf.update(0., self.initrv, data) -# self.assertEqual(upd.mean().ndim, 1) -# self.assertEqual(upd.mean().shape[0], 4) -# self.assertEqual(upd.cov().ndim, 2) -# self.assertEqual(upd.cov().shape[0], 4) -# self.assertEqual(upd.cov().shape[1], 4) +# self.assertEqual(upd.mean.ndim, 1) +# self.assertEqual(upd.mean.shape[0], 4) +# self.assertEqual(upd.cov.ndim, 2) +# self.assertEqual(upd.cov.shape[0], 4) +# self.assertEqual(upd.cov.shape[1], 4) # # def test_filter(self): # """ @@ -395,22 +394,22 @@ def test_filtsmooth(self): # """ # """ # pred, __ = self.uks.predict(0., DELTA_T, self.initrv) -# self.assertEqual(pred.mean().ndim, 1) -# self.assertEqual(pred.mean().shape[0], 4) -# self.assertEqual(pred.cov().ndim, 2) -# self.assertEqual(pred.cov().shape[0], 4) -# self.assertEqual(pred.cov().shape[1], 4) +# self.assertEqual(pred.mean.ndim, 1) +# self.assertEqual(pred.mean.shape[0], 4) +# self.assertEqual(pred.cov.ndim, 2) +# self.assertEqual(pred.cov.shape[0], 4) +# self.assertEqual(pred.cov.shape[1], 4) # # def test_update(self): # """ # """ -# data = self.measmod.sample(0., self.initrv.mean()*np.ones(1)) +# data = self.measmod.sample(0., self.initrv.mean*np.ones(1)) # upd, __, __, __ = self.uks.update(0., self.initrv, data) -# self.assertEqual(upd.mean().ndim, 1) -# self.assertEqual(upd.mean().shape[0], 4) -# self.assertEqual(upd.cov().ndim, 2) -# self.assertEqual(upd.cov().shape[0], 4) -# self.assertEqual(upd.cov().shape[1], 4) +# self.assertEqual(upd.mean.ndim, 1) +# self.assertEqual(upd.mean.shape[0], 4) +# self.assertEqual(upd.cov.ndim, 2) +# self.assertEqual(upd.cov.shape[0], 4) +# self.assertEqual(upd.cov.shape[1], 4) # # def test_smoother(self): # """ @@ -482,7 +481,7 @@ def test_filtsmooth(self): # self.diff = self.q * np.eye(1) # self.dynmod = LTISDEModel(self.drift, self.force, self.disp, self.diff) # self.measmod = DiscreteGaussianLTIModel(np.eye(1), np.zeros(1), r * np.eye(1)) -# self.initrv = RandomVariable(distribution=Normal(10 * np.ones(1), np.eye(1))) +# self.initrv = Normal(10 * np.ones(1), np.eye(1)) # self.kf = unscentedkalman.UnscentedKalmanFilter(self.dynmod, # self.measmod, # self.initrv, @@ -538,8 +537,8 @@ def test_filtsmooth(self): # """ # """ # pred, __ = self.kf.predict(0., DELTA_T, self.initrv) -# self.assertEqual(np.isscalar(pred.mean()), True) -# self.assertEqual(np.isscalar(pred.cov()), True) +# self.assertEqual(np.isscalar(pred.mean), True) +# self.assertEqual(np.isscalar(pred.cov), True) # # def test_predict_value(self): # """ @@ -548,18 +547,18 @@ def test_filtsmooth(self): # ah = scipy.linalg.expm(DELTA_T * self.drift) # qh = self.q / (2 * self.lam) * ( # 1 - scipy.linalg.expm(2 * self.drift * DELTA_T)) -# diff_mean = ah @ (self.initrv.mean()*np.ones(1)) - pred.mean() -# diff_covar = ah @ (self.initrv.cov()*np.eye(1)) @ ah.T + qh - (np.eye(1)*pred.cov()) +# diff_mean = ah @ (self.initrv.mean*np.ones(1)) - pred.mean +# diff_covar = ah @ (self.initrv.cov*np.eye(1)) @ ah.T + qh - (np.eye(1)*pred.cov) # self.assertLess(np.linalg.norm(diff_mean), 1e-14) # self.assertLess(np.linalg.norm(diff_covar), 1e-14) # # def test_update(self): # """ # """ -# data = np.array([self.measmod.sample(0., self.initrv.mean()*np.ones(1))]) +# data = np.array([self.measmod.sample(0., self.initrv.mean*np.ones(1))]) # upd, __, __, __ = self.kf.update(0., self.initrv, data) -# self.assertEqual(np.isscalar(upd.mean()), True) -# self.assertEqual(np.isscalar(upd.cov()), True) +# self.assertEqual(np.isscalar(upd.mean), True) +# self.assertEqual(np.isscalar(upd.cov), True) # # def test_filter(self): # """ @@ -629,8 +628,8 @@ def test_filtsmooth(self): # """ # """ # pred, __ = self.ks.predict(0., DELTA_T, self.initrv) -# self.assertEqual(np.isscalar(pred.mean()), True) -# self.assertEqual(np.isscalar(pred.cov()), True) +# self.assertEqual(np.isscalar(pred.mean), True) +# self.assertEqual(np.isscalar(pred.cov), True) # # def test_predict_value(self): # """ @@ -639,18 +638,18 @@ def test_filtsmooth(self): # ah = scipy.linalg.expm(DELTA_T * self.drift) # qh = self.q / (2 * self.lam) * ( # 1 - scipy.linalg.expm(2 * self.drift * DELTA_T)) -# diff_mean = ah @ (self.initrv.mean()*np.ones(1)) - pred.mean() -# diff_covar = ah @ (self.initrv.cov()*np.eye(1)) @ ah.T + qh - (np.eye(1)*pred.cov()) +# diff_mean = ah @ (self.initrv.mean*np.ones(1)) - pred.mean +# diff_covar = ah @ (self.initrv.cov*np.eye(1)) @ ah.T + qh - (np.eye(1)*pred.cov) # self.assertLess(np.linalg.norm(diff_mean), 1e-14) # self.assertLess(np.linalg.norm(diff_covar), 1e-14) # # def test_update(self): # """ # """ -# data = np.array([self.measmod.sample(0., self.initrv.mean()*np.ones(1))]) +# data = np.array([self.measmod.sample(0., self.initrv.mean*np.ones(1))]) # upd, __, __, __ = self.ks.update(0., self.initrv, data) -# self.assertEqual(np.isscalar(upd.mean()), True) -# self.assertEqual(np.isscalar(upd.cov()), True) +# self.assertEqual(np.isscalar(upd.mean), True) +# self.assertEqual(np.isscalar(upd.cov), True) # # def test_smoother(self): # """ @@ -713,7 +712,7 @@ def test_filtsmooth(self): # initcov = var * np.eye(2) # self.dynamod = DiscreteGaussianModel(f, lambda t: q) # self.measmod = DiscreteGaussianModel(h, lambda t: self.r) -# self.initdist = RandomVariable(distribution=Normal(initmean, initcov)) +# self.initdist = Normal(initmean, initcov) # self.times = np.arange(0, 4, delta_t) # self.q = q # alpha, beta, kappa = np.random.rand(3) diff --git a/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py b/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py index c1ac830aca..f34258a312 100644 --- a/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py +++ b/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py @@ -2,8 +2,7 @@ import numpy as np -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal, Dirac +from probnum.prob import Normal, Dirac from probnum.filtsmooth.statespace.continuous import continuousmodel VISUALISE = False @@ -61,8 +60,8 @@ def setUp(self): def test_sample(self): mean, cov = np.zeros(TEST_NDIM), np.eye(TEST_NDIM) - randvar = RandomVariable(distribution=Normal(mean, cov)) - samp = self.mcm.sample(0.0, 1.0, 0.01, randvar.mean()) + randvar = Normal(mean, cov) + samp = self.mcm.sample(0.0, 1.0, 0.01, randvar.mean) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) @@ -123,8 +122,8 @@ class TestDeterministicModel(unittest.TestCase): def setUp(self): dm = DeterministicModel() - randvar = RandomVariable(distribution=Dirac(np.ones(TEST_NDIM))) - self.samp = dm.sample(0.0, 1.0, 0.01, randvar.mean()) + randvar = Dirac(np.ones(TEST_NDIM)) + self.samp = dm.sample(0.0, 1.0, 0.01, randvar.mean) def test_sample_shape(self): self.assertEqual(self.samp.ndim, 1) diff --git a/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py b/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py index da1e3bb227..cdc06c3f2f 100644 --- a/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py +++ b/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py @@ -2,8 +2,7 @@ import numpy as np -from probnum.prob import RandomVariable -from probnum.prob.distributions import Normal +from probnum.prob import Normal from probnum.filtsmooth.statespace.continuous import linearsdemodel TEST_NDIM = 2 @@ -62,15 +61,15 @@ def test_chapmankolmogorov(self): is according to iteration. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) - rvar = RandomVariable(distribution=Normal(mean, cov)) + rvar = Normal(mean, cov) cke, __ = self.lm.chapmankolmogorov(0.0, 1.0, 1.0, rvar) - diff_mean = self.driftmat @ rvar.mean() + self.force - cke.mean() + rvar.mean() + diff_mean = self.driftmat @ rvar.mean + self.force - cke.mean + rvar.mean diff_cov = ( - self.driftmat @ rvar.cov() - + rvar.cov() @ self.driftmat.T + self.driftmat @ rvar.cov + + rvar.cov @ self.driftmat.T + self.dispmat @ self.diffmat @ self.dispmat.T - + rvar.cov() - - cke.cov() + + rvar.cov + - cke.cov ) self.assertLess(np.linalg.norm(diff_mean), 1e-14) self.assertLess(np.linalg.norm(diff_cov), 1e-14) @@ -159,11 +158,11 @@ def test_chapmankolmogorov(self): is according to closed form of IBM kernels.. """ mean, cov = np.ones(TEST_NDIM), np.eye(TEST_NDIM) - rvar = RandomVariable(distribution=Normal(mean, cov)) + rvar = Normal(mean, cov) delta = 0.1 cke, __ = self.lti.chapmankolmogorov(0.0, 1.0, delta, rvar) ah, xih, qh = ibm_a(1.0), ibm_xi(1.0), ibm_q(1.0) - diff_mean = np.linalg.norm(ah @ rvar.mean() + xih - cke.mean()) - diff_cov = np.linalg.norm(ah @ rvar.cov() @ ah.T + qh - cke.cov()) + diff_mean = np.linalg.norm(ah @ rvar.mean + xih - cke.mean) + diff_cov = np.linalg.norm(ah @ rvar.cov @ ah.T + qh - cke.cov) self.assertLess(diff_mean, 1e-14) self.assertLess(diff_cov, 1e-14) diff --git a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py index 080fd4afb8..2295b381f8 100644 --- a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py +++ b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py @@ -126,9 +126,9 @@ def test_symmetric_posterior_params(self): for matblinsolve in self.matblinsolvers: with self.subTest(): _, _, Ainv, _ = matblinsolve(A=A, b=b) - Ainv_mean = Ainv.mean().todense() - Ainv_cov_A = Ainv.cov().A.todense() - Ainv_cov_B = Ainv.cov().B.todense() + Ainv_mean = Ainv.mean.todense() + Ainv_cov_A = Ainv.cov.A.todense() + Ainv_cov_B = Ainv.cov.B.todense() self.assertAllClose(Ainv_mean, Ainv_mean.T, rtol=1e-6) self.assertAllClose(Ainv_cov_A, Ainv_cov_B, rtol=1e-6) self.assertAllClose(Ainv_cov_A, Ainv_cov_A.T, rtol=1e-6) @@ -146,7 +146,7 @@ def test_zero_rhs(self): with self.subTest(): for tol in tols: x, _, _, info = plinsolve(A=A, b=b, atol=tol) - self.assertAllClose(x.mean(), 0, atol=1e-15) + self.assertAllClose(x.mean, 0, atol=1e-15) def test_multiple_rhs(self): """Linear system with matrix right hand side.""" @@ -164,7 +164,7 @@ def test_multiple_rhs(self): B.shape, msg="Shape of solution and right hand side do not match.", ) - self.assertAllClose(x.mean(), np.linalg.solve(A, B)) + self.assertAllClose(x.mean, np.linalg.solve(A, B)) def test_spd_matrix(self): """Random spd matrix.""" @@ -179,7 +179,7 @@ def test_spd_matrix(self): with self.subTest(): x, _, _, info = matblinsolve(A=A, b=b) self.assertAllClose( - x.mean(), + x.mean, x_true, rtol=1e-6, atol=1e-6, @@ -195,7 +195,7 @@ def test_sparse_poisson(self): with self.subTest(): u_solver, Ahat, Ainvhat, info = plinsolve(A=A, b=f) self.assertAllClose( - u_solver.mean(), + u_solver.mean, u, rtol=1e-5, msg="Solution from probabilistic linear solver does" @@ -211,7 +211,7 @@ def test_residual_matches_error(self): x_est, Ahat, Ainvhat, info = plinsolve(A=A, b=b) self.assertAlmostEqual( info["resid_l2norm"], - np.linalg.norm(A @ x_est.mean() - b), + np.linalg.norm(A @ x_est.mean - b), msg="Residual in output info does not match l2-error of solution estimate.", ) @@ -225,7 +225,7 @@ def test_residual_matches_error(self): # u_solver, Ahat, Ainvhat, info = matblinsolve(A=A, b=f) # # # E[x] = E[A^-1] b - # self.assertAllClose(u_solver.mean(), (Ainvhat @ f[:, None]).mean().ravel(), rtol=1e-5, + # self.assertAllClose(u_solver.mean, (Ainvhat @ f[:, None]).mean.ravel(), rtol=1e-5, # msg="Solution from matrix-based probabilistic linear solver does not match the " + # "estimated inverse, i.e. x =/= Ainv @ b ") @@ -260,13 +260,13 @@ def callback_postparams(xk, Ak, Ainvk, sk, yk, alphak, resid): Y = np.squeeze(np.array(Y)).T self.assertAllClose( - np.array(Ahat.cov().A.todense()) @ S, + np.array(Ahat.cov.A.todense()) @ S, np.zeros_like(S), atol=1e-6, msg="Uncertainty over A in explored space span(S) not zero.", ) self.assertAllClose( - np.array(Ainvhat.cov().A.todense()) @ Y, + np.array(Ainvhat.cov.A.todense()) @ Y, np.zeros_like(S), atol=1e-6, msg="Uncertainty over Ainv in explored space span(Y) not zero.", @@ -286,13 +286,12 @@ def test_posterior_covariance_posdef(self): # Check positive definiteness self.assertArrayLess( np.zeros(np.shape(A)[0]), - np.real_if_close(np.linalg.eigvals(Ahat.cov().A.todense())) + eps, + np.real_if_close(np.linalg.eigvals(Ahat.cov.A.todense())) + eps, msg="Covariance of A not positive semi-definite.", ) self.assertArrayLess( np.zeros(np.shape(A)[0]), - np.real_if_close(np.linalg.eigvals(Ainvhat.cov().A.todense())) - + eps, + np.real_if_close(np.linalg.eigvals(Ainvhat.cov.A.todense())) + eps, msg="Covariance of Ainv not positive semi-definite.", ) @@ -308,14 +307,14 @@ def test_matrixprior(self): # Prior distributions on A covA = linops.SymmetricKronecker(A=np.eye(n)) - Ainv0 = prob.RandomVariable(distribution=prob.Normal(mean=np.eye(n), cov=covA)) + Ainv0 = prob.random_variable.Normal(mean=np.eye(n), cov=covA) for matblinsolve in self.matblinsolvers: with self.subTest(): x, Ahat, Ainvhat, info = matblinsolve(A=A, Ainv0=Ainv0, b=b) self.assertAllClose( - x.mean(), + x.mean, x_true, rtol=1e-6, atol=1e-6, @@ -368,7 +367,7 @@ def callback_iterates_CG(xk): # Solve linear system - # Initial guess as chosen by PLS: x0 = Ainv.mean() @ b + # Initial guess as chosen by PLS: x0 = Ainv.mean @ b x0 = b # Conjugate gradient method @@ -378,16 +377,12 @@ def callback_iterates_CG(xk): cg_iters_arr = np.array([x0] + cg_iterates) # Matrix priors (encoding weak symmetric posterior correspondence) - Ainv0 = prob.RandomVariable( - distribution=prob.Normal( - mean=linops.Identity(A.shape[1]), - cov=linops.SymmetricKronecker(A=linops.Identity(A.shape[1])), - ) + Ainv0 = prob.random_variable.Normal( + mean=linops.Identity(A.shape[1]), + cov=linops.SymmetricKronecker(A=linops.Identity(A.shape[1])), ) - A0 = prob.RandomVariable( - distribution=prob.Normal( - mean=linops.Identity(A.shape[1]), cov=linops.SymmetricKronecker(A) - ) + A0 = prob.random_variable.Normal( + mean=linops.Identity(A.shape[1]), cov=linops.SymmetricKronecker(A) ) for kwargs in [{"assume_A": "sympos", "rtol": 10 ** -6}]: with self.subTest(): @@ -398,7 +393,7 @@ def callback_iterates_CG(xk): def callback_iterates_PLS( xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs ): - pls_iterates.append(xk.mean()) + pls_iterates.append(xk.mean) # Probabilistic linear solver xhat_pls, _, _, info_pls = linalg.problinsolve( @@ -411,7 +406,7 @@ def callback_iterates_PLS( ) pls_iters_arr = np.array([x0] + pls_iterates) - self.assertAllClose(xhat_pls.mean(), xhat_cg, rtol=10 ** -12) + self.assertAllClose(xhat_pls.mean, xhat_cg, rtol=10 ** -12) self.assertAllClose(pls_iters_arr, cg_iters_arr, rtol=10 ** -12) def test_prior_distributions(self): @@ -429,7 +424,7 @@ def test_iterative_covariance_trace_update(self): ) self.assertAlmostEqual( info["trace_sol_cov"], - x_est.cov().trace(), + x_est.cov.trace(), msg="Iteratively computed trace not equal to trace of solution covariance.", ) @@ -444,7 +439,7 @@ def test_uncertainty_calibration_error(self): A=A, b=b, calibration=calib_method ) self.assertLessEqual( - (x_true - x_est.mean()).T @ A @ (x_true - x_est.mean()), + (x_true - x_est.mean).T @ A @ (x_true - x_est.mean), tol, msg="Estimated solution not sufficiently close to true solution.", ) diff --git a/tests/test_prob/test_distributions/__init__.py b/tests/test_prob/test_distributions/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/test_prob/test_distributions/test_distribution.py b/tests/test_prob/test_distributions/test_distribution.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/test_prob/test_distributions/test_dirac.py b/tests/test_prob/test_random_variables/test_dirac.py similarity index 81% rename from tests/test_prob/test_distributions/test_dirac.py rename to tests/test_prob/test_random_variables/test_dirac.py index a32eb04f22..8d44a02442 100644 --- a/tests/test_prob/test_distributions/test_dirac.py +++ b/tests/test_prob/test_random_variables/test_dirac.py @@ -3,7 +3,8 @@ import numpy as np -from probnum import prob +from probnum.prob import random_variable as rv +from probnum import utils as _utils class TestDirac(unittest.TestCase): @@ -20,9 +21,8 @@ def test_sample_shapes(self): for supp in self.supports: for sample_size in [1, (), 10, (4,), (3, 2)]: with self.subTest(): - s = prob.Dirac(support=supp).sample(size=sample_size) - # pylint: disable=consider-using-in - if sample_size == 1 or sample_size == (): + s = rv.Dirac(support=supp).sample(size=sample_size) + if sample_size == (): self.assertEqual(np.shape(supp), np.shape(s)) elif isinstance(sample_size, tuple): self.assertEqual(sample_size + np.shape(supp), np.shape(s)) diff --git a/tests/test_prob/test_distributions/test_normal.py b/tests/test_prob/test_random_variables/test_normal.py similarity index 59% rename from tests/test_prob/test_distributions/test_normal.py rename to tests/test_prob/test_random_variables/test_normal.py index 9c773b1ee8..3701afb891 100644 --- a/tests/test_prob/test_distributions/test_normal.py +++ b/tests/test_prob/test_random_variables/test_normal.py @@ -62,7 +62,7 @@ def setUp(self): sparsemat = scipy.sparse.rand(m=m, n=n, density=0.1, random_state=1) self.normal_params = [ # Univariate - (-1, 3), + (-1.0, 3.0), # Multivariate (np.random.uniform(size=10), np.eye(10)), (np.random.uniform(size=10), _random_spd_matrix(10)), @@ -74,7 +74,7 @@ def setUp(self): ).todense(), ), # Operatorvariate - (np.array([1, -5]), linops.MatrixMult(A=np.array([[2, 1], [1, -0.1]]))), + (np.array([1.0, -5.0]), linops.MatrixMult(A=np.array([[2, 1], [1, -0.1]]))), (linops.MatrixMult(A=np.array([[0, -5]])), linops.Identity(shape=(2, 2))), ( np.array([[1, 2], [-3, -0.4], [4, 1]]), @@ -110,14 +110,14 @@ def test_scalarmult(self): itertools.product(self.normal_params, self.constants) ): with self.subTest(): - normrv = const * prob.RandomVariable( - distribution=prob.Normal(mean=mean, cov=cov) - ) + normrv = const * prob.random_variable.Normal(mean=mean, cov=cov) + self.assertIsInstance(normrv, prob.RandomVariable) + if const != 0: - self.assertIsInstance(normrv.distribution, prob.Normal) + self.assertIsInstance(normrv, prob.random_variable.Normal) else: - self.assertIsInstance(normrv.distribution, prob.Dirac) + self.assertIsInstance(normrv, prob.random_variable.Dirac) def test_addition_normal(self): """Add two random variables with a normal distribution""" @@ -125,20 +125,21 @@ def test_addition_normal(self): itertools.product(self.normal_params, self.normal_params) ): with self.subTest(): - normrv0 = prob.RandomVariable( - distribution=prob.Normal(mean=mean0, cov=cov0) - ) - normrv1 = prob.RandomVariable( - distribution=prob.Normal(mean=mean1, cov=cov1) - ) - - if not isinstance(normrv1.distribution, type(normrv0.distribution)): - continue + normrv0 = prob.Normal(mean=mean0, cov=cov0) + normrv1 = prob.Normal(mean=mean1, cov=cov1) if normrv0.shape == normrv1.shape: - self.assertIsInstance((normrv0 + normrv1).distribution, prob.Normal) + try: + newmean = mean0 + mean1 + newcov = cov0 + cov1 + except TypeError: + continue + + self.assertIsInstance( + normrv0 + normrv1, prob.random_variable.Normal + ) else: - with self.assertRaises(TypeError): + with self.assertRaises(ValueError): normrv_added = normrv0 + normrv1 def test_rv_linop_kroneckercov(self): @@ -149,12 +150,12 @@ def mv(v): A = linops.LinearOperator(shape=(2, 2), matvec=mv) V = linops.Kronecker(A, A) - prob.RandomVariable(distribution=prob.Normal(mean=A, cov=V)) + prob.Normal(mean=A, cov=V) def test_normal_dimension_mismatch(self): """Instantiating a normal distribution with mismatched mean and kernels should result in a ValueError.""" for mean, cov in [ - (0, [1, 2]), + (0, np.array([1, 2])), (np.array([1, 2]), np.array([1, 0])), (np.array([[-1, 0], [2, 1]]), np.eye(3)), ]: @@ -184,13 +185,12 @@ def test_sample(self): """Draw samples and check all sample dimensions.""" for mean, cov in self.normal_params: with self.subTest(): - # TODO: check dimension of each realization in dist_sample - dist = prob.Normal(mean=mean, cov=cov, random_state=1) - dist_sample = dist.sample(size=5) - if not np.isscalar(dist.mean()): - ndims_rv = len(mean.shape) + # TODO: check dimension of each realization in rv_sample + rv = prob.Normal(mean=mean, cov=cov, random_state=1) + rv_sample = rv.sample(size=5) + if not np.isscalar(rv.mean): self.assertEqual( - dist_sample.shape[-ndims_rv:], + rv_sample.shape[-rv.ndim :], mean.shape, msg="Realization shape does not match mean shape.", ) @@ -199,15 +199,13 @@ def test_sample_zero_cov(self): """Draw sample from distribution with zero kernels and check whether it equals the mean.""" for mean, cov in self.normal_params: with self.subTest(): - dist = prob.Normal(mean=mean, cov=0 * cov, random_state=1) - dist_sample = dist.sample(size=1) + rv = prob.Normal(mean=mean, cov=0 * cov, random_state=1) + rv_sample = rv.sample(size=1) assert_str = "Draw with kernels zero does not match mean." - if isinstance(dist.mean(), linops.LinearOperator): - self.assertAllClose( - dist_sample, dist.mean().todense(), msg=assert_str - ) + if isinstance(rv.mean, linops.LinearOperator): + self.assertAllClose(rv_sample, rv.mean.todense(), msg=assert_str) else: - self.assertAllClose(dist_sample, dist.mean(), msg=assert_str) + self.assertAllClose(rv_sample, rv.mean, msg=assert_str) def test_symmetric_samples(self): """Samples from a normal distribution with symmetric Kronecker kernels of two symmetric matrices are @@ -216,11 +214,11 @@ def test_symmetric_samples(self): n = 3 A = np.random.uniform(size=(n, n)) A = 0.5 * (A + A.T) + n * np.eye(n) - dist = prob.Normal( + rv = prob.Normal( mean=np.eye(A.shape[0]), cov=linops.SymmetricKronecker(A=A), random_state=1 ) - dist_sample = dist.sample(size=10) - for i, B in enumerate(dist_sample): + rv = rv.sample(size=10) + for i, B in enumerate(rv): self.assertAllClose( B, B.T, @@ -234,52 +232,47 @@ def test_symmetric_samples(self): def test_indexing(self): """ Indexing with Python integers yields a univariate normal distribution. """ for mean, cov in self.normal_params: - dist = prob.Normal(mean=mean, cov=cov) + rv = prob.Normal(mean=mean, cov=cov) - if isinstance( - dist, - ( - prob.distributions.normal._UnivariateNormal, - prob.distributions.normal._OperatorvariateNormal, # TODO: Implement slicing on linear operators - ), + if isinstance(rv.mean, linops.LinearOperator) or isinstance( + rv.cov, linops.LinearOperator ): + # TODO: Implement slicing on linear operators continue with self.subTest(): # Sample random index - idx = tuple(np.random.randint(dim_size) for dim_size in dist.shape) + idx = tuple(np.random.randint(dim_size) for dim_size in rv.shape) # Index into distribution - index_dist = dist[idx] + indexed_rv = rv[idx] - self.assertIsInstance( - index_dist, prob.distributions.normal._UnivariateNormal - ) + self.assertIsInstance(indexed_rv, prob.Normal) # Compare with expected parameter values - if len(dist.shape) == 1: - flat_idx = idx[0] + if rv.ndim == 0: + flat_idx = () + elif rv.ndim == 1: + flat_idx = (idx[0],) else: - assert len(dist.shape) == 2 + assert rv.ndim == 2 - flat_idx = idx[0] * dist.shape[1] + idx[1] + flat_idx = (idx[0] * rv.shape[1] + idx[1],) - self.assertEqual(index_dist.mean(), dist.mean()[idx]) - self.assertEqual(index_dist.var(), dist.var()[flat_idx]) - self.assertEqual(index_dist.cov(), dist.cov()[flat_idx, flat_idx]) + self.assertEqual(indexed_rv.shape, ()) + self.assertEqual(indexed_rv.mean, rv.mean[idx]) + self.assertEqual(indexed_rv.var, rv.var[idx]) + self.assertEqual(indexed_rv.cov, rv.cov[flat_idx + flat_idx]) def test_slicing(self): """ Slicing into a normal distribution yields a normal distribution of the same type """ for mean, cov in self.normal_params: - dist = prob.Normal(mean=mean, cov=cov) + rv = prob.Normal(mean=mean, cov=cov) - if isinstance( - dist, - ( - prob.distributions.normal._UnivariateNormal, - prob.distributions.normal._OperatorvariateNormal, # TODO: Implement slicing on linear operators - ), + if isinstance(rv.mean, linops.LinearOperator) or isinstance( + rv.cov, linops.LinearOperator ): + # TODO: Implement slicing on linear operators continue def _random_slice(dim_size): @@ -290,35 +283,34 @@ def _random_slice(dim_size): with self.subTest(): # Sample random slice objects for each dimension - slices = tuple(_random_slice(dim_size) for dim_size in dist.shape) + slices = tuple(_random_slice(dim_size) for dim_size in rv.shape) # Get slice from distribution - sliced_dist = dist[slices] - - self.assertIsInstance(sliced_dist, type(dist)) + sliced_rv = rv[slices] # Compare with expected parameter values - slice_mask = np.zeros_like(dist.mean(), dtype=np.bool) + slice_mask = np.zeros_like(rv.mean, dtype=np.bool) slice_mask[slices] = True slice_mask = slice_mask.ravel() - self.assertArrayEqual(sliced_dist.mean(), dist.mean()[slices]) - self.assertArrayEqual(sliced_dist.var(), dist.var()[slice_mask]) - self.assertArrayEqual( - sliced_dist.cov(), dist.cov()[np.ix_(slice_mask, slice_mask)] - ) + self.assertArrayEqual(sliced_rv.mean, rv.mean[slices]) + self.assertArrayEqual(sliced_rv.var, rv.var[slices]) + + if rv.ndim > 0: + self.assertArrayEqual( + sliced_rv.cov, rv.cov[np.ix_(slice_mask, slice_mask)] + ) + else: + self.assertArrayEqual(sliced_rv.cov, rv.cov) def test_array_indexing(self): """ Indexing with 1-dim integer arrays yields a multivariate normal. """ for mean, cov in self.normal_params: - dist = prob.Normal(mean=mean, cov=cov) + rv = prob.Normal(mean=mean, cov=cov) - if isinstance( - dist, - ( - prob.distributions.normal._UnivariateNormal, - prob.distributions.normal._OperatorvariateNormal, - ), + if rv.ndim == 0 or ( + isinstance(rv.mean, linops.LinearOperator) + or isinstance(rv.cov, linops.LinearOperator) ): continue @@ -329,111 +321,101 @@ def test_array_indexing(self): ) # Index into distribution - index_dist = dist[idcs] + indexed_rv = rv[idcs] - self.assertIsInstance( - index_dist, prob.distributions.normal._MultivariateNormal - ) + self.assertIsInstance(indexed_rv, prob.random_variable.Normal) # Compare with expected parameter values - if len(dist.shape) == 1: + if len(rv.shape) == 1: flat_idcs = idcs[0] else: - assert len(dist.shape) == 2 + assert len(rv.shape) == 2 - flat_idcs = idcs[0] * dist.shape[1] + idcs[1] + flat_idcs = idcs[0] * rv.shape[1] + idcs[1] - self.assertEqual(index_dist.shape, (10,)) + self.assertEqual(indexed_rv.shape, (10,)) - self.assertArrayEqual(index_dist.mean(), dist.mean()[idcs]) - self.assertArrayEqual(index_dist.var(), dist.var()[flat_idcs]) + self.assertArrayEqual(indexed_rv.mean, rv.mean[idcs]) + self.assertArrayEqual(indexed_rv.var, rv.var[idcs]) self.assertArrayEqual( - index_dist.cov(), dist.cov()[np.ix_(flat_idcs, flat_idcs)] + indexed_rv.cov, rv.cov[np.ix_(flat_idcs, flat_idcs)] ) def test_array_indexing_broadcast(self): """ Indexing with broadcasted integer arrays yields a matrixvariate normal. """ for mean, cov in self.normal_params: - dist = prob.Normal(mean=mean, cov=cov) - - if isinstance( - dist, - ( - prob.distributions.normal._UnivariateNormal, - prob.distributions.normal._MultivariateNormal, - prob.distributions.normal._OperatorvariateNormal, - ), + rv = prob.Normal(mean=mean, cov=cov) + + if rv.ndim != 2 or ( + isinstance(rv.mean, linops.LinearOperator) + or isinstance(rv.cov, linops.LinearOperator) ): + # TODO: Implement slicing on linear operators continue - assert len(dist.shape) == 2 + assert rv.ndim == 2 with self.subTest(): # Sample random indices idcs = np.ix_( *tuple( - np.random.randint(dim_shape, size=10) - for dim_shape in dist.shape + np.random.randint(dim_shape, size=10) for dim_shape in rv.shape ) ) # Index into distribution - index_dist = dist[idcs] + indexed_rv = rv[idcs] - self.assertIsInstance( - index_dist, prob.distributions.normal._MatrixvariateNormal - ) - self.assertEqual(index_dist.shape, (10, 10)) + self.assertIsInstance(indexed_rv, prob.random_variable.Normal) + self.assertEqual(indexed_rv.shape, (10, 10)) # Compare with expected parameter values flat_idcs = np.broadcast_arrays(*idcs) - flat_idcs = flat_idcs[0] * dist.shape[1] + flat_idcs[1] + flat_idcs = flat_idcs[0] * rv.shape[1] + flat_idcs[1] flat_idcs = flat_idcs.ravel() - self.assertArrayEqual(index_dist.mean(), dist.mean()[idcs]) - self.assertArrayEqual(index_dist.var(), dist.var()[flat_idcs]) + self.assertArrayEqual(indexed_rv.mean, rv.mean[idcs]) + self.assertArrayEqual(indexed_rv.var, rv.var[idcs]) self.assertArrayEqual( - index_dist.cov(), dist.cov()[np.ix_(flat_idcs, flat_idcs)] + indexed_rv.cov, rv.cov[np.ix_(flat_idcs, flat_idcs)] ) def test_masking(self): """ Masking a multivariate or matrixvariate normal yields a multivariate normal. """ for mean, cov in self.normal_params: - dist = prob.Normal(mean=mean, cov=cov) + rv = prob.Normal(mean=mean, cov=cov) - if isinstance( - dist, - ( - prob.distributions.normal._UnivariateNormal, - prob.distributions.normal._OperatorvariateNormal, - ), + if isinstance(rv.mean, linops.LinearOperator) or isinstance( + rv.cov, linops.LinearOperator ): continue with self.subTest(): # Sample random indices idcs = tuple( - np.random.randint(dim_shape, size=10) for dim_shape in mean.shape + np.random.randint(dim_shape, size=10) for dim_shape in rv.shape ) - mask = np.zeros_like(dist.mean(), dtype=np.bool) + mask = np.zeros_like(rv.mean, dtype=np.bool) mask[idcs] = True # Mask distribution - index_dist = dist[mask] + masked_rv = rv[mask] - self.assertIsInstance( - index_dist, prob.distributions.normal._MultivariateNormal - ) + self.assertIsInstance(masked_rv, prob.random_variable.Normal) # Compare with expected parameter values flat_mask = mask.flatten() - self.assertArrayEqual(index_dist.mean(), dist.mean()[mask]) - self.assertArrayEqual(index_dist.var(), dist.var()[flat_mask]) - self.assertArrayEqual( - index_dist.cov(), dist.cov()[np.ix_(flat_mask, flat_mask)] - ) + self.assertArrayEqual(masked_rv.mean, rv.mean[mask]) + self.assertArrayEqual(masked_rv.var, rv.var[mask]) + + if rv.ndim == 0: + self.assertArrayEqual(masked_rv.cov, rv.cov) + else: + self.assertArrayEqual( + masked_rv.cov, rv.cov[np.ix_(flat_mask, flat_mask)] + ) class UnivariateNormalTestCase(unittest.TestCase, NumpyAssertions): @@ -456,23 +438,16 @@ def test_reshape_newaxis(self): elif method == "reshape": newdist = dist.reshape(newshape) - self.assertIsInstance( - newdist, - prob.distributions.normal._MultivariateNormal - if ndim == 1 - else prob.distributions.normal._MatrixvariateNormal, - ) - self.assertEqual(newdist.shape, newshape) - self.assertEqual(np.squeeze(newdist.mean()), dist.mean()) - self.assertEqual(np.squeeze(newdist.cov()), dist.cov()) + self.assertEqual(np.squeeze(newdist.mean), dist.mean) + self.assertEqual(np.squeeze(newdist.cov), dist.cov) def test_transpose(self): dist = prob.Normal(*self.params) dist_t = dist.transpose() - self.assertArrayEqual(dist_t.mean(), dist.mean()) - self.assertArrayEqual(dist_t.cov(), dist.cov()) + self.assertArrayEqual(dist_t.mean, dist.mean) + self.assertArrayEqual(dist_t.cov, dist.cov) # Test sampling dist.random_state = 42 @@ -489,58 +464,54 @@ def setUp(self): self.params = (np.random.uniform(size=10), _random_spd_matrix(10)) def test_newaxis(self): - dist = prob.Normal(*self.params) - - matrix_dist = dist[:, np.newaxis] + vector_rv = prob.Normal(*self.params) - self.assertIsInstance( - matrix_dist, prob.distributions.normal._MatrixvariateNormal - ) + matrix_rv = vector_rv[:, np.newaxis] - self.assertEqual(matrix_dist.shape, (10, 1)) - self.assertArrayEqual(np.squeeze(matrix_dist.mean()), dist.mean()) - self.assertArrayEqual(matrix_dist.cov(), dist.cov()) + self.assertEqual(matrix_rv.shape, (10, 1)) + self.assertArrayEqual(np.squeeze(matrix_rv.mean), vector_rv.mean) + self.assertArrayEqual(matrix_rv.cov, vector_rv.cov) def test_reshape(self): - dist = prob.Normal(*self.params) + rv = prob.Normal(*self.params) newshape = (5, 2) - dist_reshape = dist.reshape(newshape) + reshaped_rv = rv.reshape(newshape) - self.assertArrayEqual(dist_reshape.mean(), dist.mean().reshape(newshape)) - self.assertArrayEqual(dist_reshape.cov(), dist.cov()) + self.assertArrayEqual(reshaped_rv.mean, rv.mean.reshape(newshape)) + self.assertArrayEqual(reshaped_rv.cov, rv.cov) # Test sampling - dist.random_state = 42 - dist_sample = dist.sample(size=5) + rv.random_state = 42 + dist_sample = rv.sample(size=5) - dist_reshape.random_state = 42 - dist_reshape_sample = dist_reshape.sample(size=5) + reshaped_rv.random_state = 42 + dist_reshape_sample = reshaped_rv.sample(size=5) self.assertArrayEqual( dist_reshape_sample, dist_sample.reshape((-1,) + newshape) ) def test_transpose(self): - dist = prob.Normal(*self.params) - dist_t = dist.transpose() + rv = prob.Normal(*self.params) + transposed_rv = rv.transpose() - self.assertArrayEqual(dist_t.mean(), dist.mean()) - self.assertArrayEqual(dist_t.cov(), dist.cov()) + self.assertArrayEqual(transposed_rv.mean, rv.mean) + self.assertArrayEqual(transposed_rv.cov, rv.cov) # Test sampling - dist.random_state = 42 - dist_sample = dist.sample(size=5) + rv.random_state = 42 + dist_sample = rv.sample(size=5) - dist_t.random_state = 42 - dist_t_sample = dist_t.sample(size=5) + transposed_rv.random_state = 42 + dist_t_sample = transposed_rv.sample(size=5) self.assertArrayEqual(dist_t_sample, dist_sample) class MatrixvariateNormalTestCase(unittest.TestCase, NumpyAssertions): def test_reshape(self): - dist = prob.Normal( + rv = prob.Normal( mean=np.random.uniform(size=(4, 3)), cov=linops.Kronecker( A=_random_spd_matrix(4), B=_random_spd_matrix(3) @@ -548,29 +519,29 @@ def test_reshape(self): ) newshape = (2, 6) - dist_reshape = dist.reshape(newshape) + reshaped_rv = rv.reshape(newshape) - self.assertArrayEqual(dist_reshape.mean(), dist.mean().reshape(newshape)) - self.assertArrayEqual(dist_reshape.cov(), dist.cov()) + self.assertArrayEqual(reshaped_rv.mean, rv.mean.reshape(newshape)) + self.assertArrayEqual(reshaped_rv.cov, rv.cov) # Test sampling - dist.random_state = 42 - dist_sample = dist.sample(size=5) + rv.random_state = 42 + dist_sample = rv.sample(size=5) - dist_reshape.random_state = 42 - dist_reshape_sample = dist_reshape.sample(size=5) + reshaped_rv.random_state = 42 + dist_reshape_sample = reshaped_rv.sample(size=5) self.assertArrayEqual( dist_reshape_sample, dist_sample.reshape((-1,) + newshape) ) def test_transpose(self): - dist = prob.Normal( + rv = prob.Normal( mean=np.random.uniform(size=(2, 2)), cov=_random_spd_matrix(4), ) - dist_t = dist.transpose() + transposed_rv = rv.transpose() - self.assertArrayEqual(dist_t.mean(), dist.mean().T) + self.assertArrayEqual(transposed_rv.mean, rv.mean.T) # Test covariance for ii, ij in itertools.product(range(2), range(2)): @@ -578,7 +549,7 @@ def test_transpose(self): idx = (2 * ii + ij, 2 * ji + jj) idx_t = (2 * ij + ii, 2 * jj + ji) - self.assertEqual(dist_t.cov()[idx_t], dist.cov()[idx]) + self.assertEqual(transposed_rv.cov[idx_t], rv.cov[idx]) # Sadly, sampling is not stable w.r.t. permutations of variables diff --git a/tests/test_prob/test_randomvariable.py b/tests/test_prob/test_randomvariable.py index 33ee9c2d96..6663170268 100644 --- a/tests/test_prob/test_randomvariable.py +++ b/tests/test_prob/test_randomvariable.py @@ -33,20 +33,13 @@ def setUp(self): self.matrices2d = [np.array([[1, 2], [3, 2]]), np.array([[0, 0], [1.0, -4.3]])] self.linops2d = [linops.MatrixMult(A=np.array([[1, 2], [4, 5]]))] self.randvars2d = [ - prob.RandomVariable( - distribution=prob.Normal( - mean=np.array([1, 2]), cov=np.array([[2, 0], [0, 5]]) - ) - ) + prob.Normal(mean=np.array([1, 2]), cov=np.array([[2, 0], [0, 5]])) ] self.randvars2x2 = [ - prob.RandomVariable( - shape=(2, 2), - distribution=prob.Normal( - mean=np.array([[-2, 0.3], [0, 1]]), - cov=linops.SymmetricKronecker(A=np.eye(2), B=np.ones((2, 2))), - ), - ) + prob.Normal( + mean=np.array([[-2, 0.3], [0, 1]]), + cov=linops.SymmetricKronecker(A=np.eye(2), B=np.ones((2, 2))), + ), ] self.scipyrvs = [ @@ -158,8 +151,8 @@ def test_rv_vector_product(self): x = np.array([[1], [-4]]) y = rv @ x X = np.kron(np.eye(rv.shape[0]), x) - truemean = rv.mean() @ x - truecov = X.T @ rv.cov().todense() @ X + truemean = rv.mean @ x + truecov = X.T @ rv.cov.todense() @ X self.assertIsInstance( y, prob.RandomVariable, @@ -169,10 +162,10 @@ def test_rv_vector_product(self): y.shape, (2, 1), "Shape of resulting random variable incorrect." ) self.assertAllClose( - y.mean(), truemean, msg="Means of random variables do not match." + y.mean, truemean, msg="Means of random variables do not match." ) self.assertAllClose( - y.cov().todense(), + y.cov.todense(), truecov, msg="Covariances of random variables do not match.", ) diff --git a/tox.ini b/tox.ini index 79b48f26a4..16e3a60620 100644 --- a/tox.ini +++ b/tox.ini @@ -58,7 +58,7 @@ commands = pylint src/probnum/diffeq --disable="protected-access,abstract-class-instantiated,too-many-locals,too-few-public-methods,too-many-arguments,unused-argument,missing-module-docstring,missing-function-docstring" pylint src/probnum/filtsmooth --disable="duplicate-code,protected-access,no-self-use,too-many-locals,arguments-differ,too-many-arguments,unused-argument,missing-module-docstring,missing-function-docstring" pylint src/probnum/linalg --disable="attribute-defined-outside-init,too-many-statements,too-many-instance-attributes,too-complex,protected-access,too-many-lines,no-self-use,too-many-locals,redefined-builtin,arguments-differ,abstract-method,too-many-arguments,too-many-branches,duplicate-code,unused-argument,fixme,missing-module-docstring" - pylint src/probnum/prob --disable="too-many-instance-attributes,broad-except,arguments-differ,abstract-method,too-many-arguments,protected-access,duplicate-code,unused-argument,fixme,missing-module-docstring,missing-function-docstring" + pylint src/probnum/prob --disable="protected-access,fixme,missing-module-docstring,missing-function-docstring" pylint src/probnum/quad --disable="attribute-defined-outside-init,too-few-public-methods,redefined-builtin,arguments-differ,unused-argument,missing-module-docstring" pylint src/probnum/utils --disable="duplicate-code,missing-module-docstring,missing-function-docstring" pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable,protected-access,attribute-defined-outside-init,no-self-use,abstract-class-instantiated,too-many-arguments,too-many-instance-attributes,too-many-locals,unused-argument,fixme,missing-module-docstring,missing-function-docstring"