From 6713a2400e72ad2c6cc835822ac7d89c2fcf6673 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Thu, 13 Aug 2020 18:17:39 +0200 Subject: [PATCH 01/44] initial pylint config --- pyproject.toml | 10 +++++++++- tox.ini | 6 ++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3a9d98fbd..24ff82bbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,4 +20,12 @@ exclude = ''' | build | dist )/ -''' \ No newline at end of file +''' + + +[tool.pylint.messages_control] +# From black: bad-continuation and bad-whitespace +disable = "bad-continuation, bad-whitespace" + +[tool.pylint.format] +max-line-length = "88" diff --git a/tox.ini b/tox.ini index f84f11228..aac433820 100644 --- a/tox.ini +++ b/tox.ini @@ -45,3 +45,9 @@ changedir = benchmarks commands = asv machine --yes asv dev + +[testenv:pylint] +basepython = python3 +description = Code linting with pylint +deps = pylint +commands = pylint src test From 5eff86e4f1170e799df65f4706acb68bbbcbb24f Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:09:58 +0200 Subject: [PATCH 02/44] Unrelated small tox fix (unused line) --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index aac433820..057e0d90e 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,6 @@ deps = pytest-cov commands = pytest --doctest-modules --cov=probnum --no-cov-on-fail --cov-report=xml -whitelist_externals = make [testenv:docs] description = Invoke sphinx-build to build the HTML docs @@ -21,6 +20,7 @@ passenv = HOME deps = -r{toxinidir}/docs/requirements.txt changedir = docs +whitelist_externals = make commands = make clean make html From acb56ec6c9e4cd9a8f7275e2fa8094b94677433b Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Thu, 13 Aug 2020 18:32:33 +0200 Subject: [PATCH 03/44] added some more exceptions to pylint, which will influence my order of working --- pyproject.toml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 24ff82bbc..e02937c81 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,8 +24,23 @@ exclude = ''' [tool.pylint.messages_control] -# From black: bad-continuation and bad-whitespace -disable = "bad-continuation, bad-whitespace" +# From black: bad-continuation, bad-whitespace +# Exceptions we want to have: invalid-name +# Unclear to me: no-else-return, no-else-raise +disable = """ +bad-continuation, +bad-whitespace, + +invalid-name, + +no-else-return +no-else-raise + +line-too-long, +fixme, +too-many-arguments +missing-module-docstring +""" [tool.pylint.format] max-line-length = "88" From 21212cdd023dee00d49b3e1ef3a1bdf58ed34095 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Thu, 13 Aug 2020 18:33:04 +0200 Subject: [PATCH 04/44] renamed github workflow and added pylint --- .github/workflows/{black.yml => linting.yml} | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) rename .github/workflows/{black.yml => linting.yml} (54%) diff --git a/.github/workflows/black.yml b/.github/workflows/linting.yml similarity index 54% rename from .github/workflows/black.yml rename to .github/workflows/linting.yml index 3cb0ad6b0..de5be332f 100644 --- a/.github/workflows/black.yml +++ b/.github/workflows/linting.yml @@ -12,4 +12,12 @@ jobs: run: pip install tox - name: Run black through tox run: tox -e black - + pylint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - name: Install tox + run: pip install tox + - name: Run pylint through tox + run: tox -e pylint From 7d5ef3d16750f5e7289cb123f9ff8632ffafa4f5 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Thu, 13 Aug 2020 18:33:21 +0200 Subject: [PATCH 05/44] added black and pylint to the default tox envs --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 057e0d90e..8ef27f5f8 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = py3, docs, benchmarks +envlist = py3, docs, benchmarks, black, pylint [testenv] deps = From 82cb7a96600a62fdaaca91fbcb2cede22d537cb2 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:14:43 +0200 Subject: [PATCH 06/44] Added some more pylint exceptions for now --- pyproject.toml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e02937c81..a839da965 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,13 +33,16 @@ bad-whitespace, invalid-name, -no-else-return -no-else-raise +no-else-return, +no-else-raise, line-too-long, fixme, -too-many-arguments -missing-module-docstring +too-many-arguments, +missing-module-docstring, +unused-wildcard-import, +abstract-method, +unused-argument, """ [tool.pylint.format] From 2063aaedb6ecbf3452226e8528c7b140bfea5e51 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:21:01 +0200 Subject: [PATCH 07/44] Fixed "useless-super-delegation" --- src/probnum/quad/bayesian/bayesquadrature.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/probnum/quad/bayesian/bayesquadrature.py b/src/probnum/quad/bayesian/bayesquadrature.py index fc187175d..727668d04 100644 --- a/src/probnum/quad/bayesian/bayesquadrature.py +++ b/src/probnum/quad/bayesian/bayesquadrature.py @@ -119,9 +119,6 @@ class BayesianQuadrature(Quadrature): This class is designed to be subclassed by implementations of Bayesian quadrature with an :meth:`integrate` method. """ - def __init__(self): - super().__init__() - def integrate(self, fun, fun0, domain, nevals, **kwargs): """ Integrate the function ``fun``. @@ -150,9 +147,6 @@ class VanillaBayesianQuadrature(BayesianQuadrature): Vanilla Bayesian quadrature in 1D. """ - def __init__(self): - super().__init__() - def integrate(self, fun, fun0, domain, nevals, **kwargs): """ Integrate the function ``fun``. @@ -197,9 +191,6 @@ class WSABIBayesianQuadrature(BayesianQuadrature): Warped Sequential Active Bayesian Integration (WSABI). """ - def __init__(self): - super().__init__() - def integrate(self, fun, fun0, domain, nevals, **kwargs): """ Integrate the function ``fun``. From 433d04a9910c9bb4c6c1b1178dc6186859f6a4f5 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:22:51 +0200 Subject: [PATCH 08/44] Fixed "unused-import" --- src/probnum/filtsmooth/statespace/statespace.py | 3 --- src/probnum/linalg/linearsolvers/matrixbased.py | 2 -- src/probnum/quad/bayesian/bayesquadrature.py | 1 - 3 files changed, 6 deletions(-) diff --git a/src/probnum/filtsmooth/statespace/statespace.py b/src/probnum/filtsmooth/statespace/statespace.py index 4beba9e8b..ad0f66c1c 100644 --- a/src/probnum/filtsmooth/statespace/statespace.py +++ b/src/probnum/filtsmooth/statespace/statespace.py @@ -5,11 +5,8 @@ or discrete-discrete (_dd) models """ -from abc import ABC, abstractmethod import numpy as np -from probnum.prob import RandomVariable, Distribution - def generate_cd(dynmod, measmod, initrv, times, _nsteps=5): """ diff --git a/src/probnum/linalg/linearsolvers/matrixbased.py b/src/probnum/linalg/linearsolvers/matrixbased.py index 8ec8a454a..ab60719cb 100644 --- a/src/probnum/linalg/linearsolvers/matrixbased.py +++ b/src/probnum/linalg/linearsolvers/matrixbased.py @@ -8,8 +8,6 @@ import abc import numpy as np -import scipy.sparse -import scipy.sparse.linalg import GPy from probnum import prob diff --git a/src/probnum/quad/bayesian/bayesquadrature.py b/src/probnum/quad/bayesian/bayesquadrature.py index 727668d04..5ee508f0f 100644 --- a/src/probnum/quad/bayesian/bayesquadrature.py +++ b/src/probnum/quad/bayesian/bayesquadrature.py @@ -7,7 +7,6 @@ the integral. """ -import numpy as np from probnum.quad.quadrature import Quadrature From 55e35e395af6d6a87d5b02b7c027e3b04f68bafb Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:27:04 +0200 Subject: [PATCH 09/44] Fixed "empty-docstring" --- src/probnum/diffeq/__init__.py | 3 --- src/probnum/diffeq/ode/__init__.py | 3 --- src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py | 1 - src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py | 9 --------- src/probnum/filtsmooth/gaussfiltsmooth/kalman.py | 2 -- .../filtsmooth/gaussfiltsmooth/unscentedkalman.py | 9 --------- .../filtsmooth/statespace/continuous/linearsdemodel.py | 6 ------ .../statespace/discrete/discretegaussianmodel.py | 9 --------- .../filtsmooth/statespace/discrete/discretemodel.py | 5 ----- 9 files changed, 47 deletions(-) diff --git a/src/probnum/diffeq/__init__.py b/src/probnum/diffeq/__init__.py index b5ad95c63..05a3afa36 100644 --- a/src/probnum/diffeq/__init__.py +++ b/src/probnum/diffeq/__init__.py @@ -1,6 +1,3 @@ -""" -""" - from .ode import * from .odefiltsmooth import * from .steprule import * diff --git a/src/probnum/diffeq/ode/__init__.py b/src/probnum/diffeq/ode/__init__.py index 5bd734335..f9a71955c 100644 --- a/src/probnum/diffeq/ode/__init__.py +++ b/src/probnum/diffeq/ode/__init__.py @@ -1,6 +1,3 @@ -""" -""" - from .ivp import * from .ode import * diff --git a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py index a988ee5f7..754d348e6 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py @@ -172,5 +172,4 @@ def _suggest_step(self, step, errorest): @property def prior(self): - """ """ return self.gfilt.dynamicmodel diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py index 90887a258..0d712db88 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py @@ -68,21 +68,16 @@ def __init__(self, dynamod, measmod, initrv, **kwargs): super().__init__(dynamod, measmod, initrv) def predict(self, start, stop, randvar, **kwargs): - """ """ step = (stop - start) / self.cke_nsteps return self.dynamicmodel.chapmankolmogorov(start, stop, step, randvar, **kwargs) def update(self, time, randvar, data, **kwargs): - """ """ return _discrete_extkalman_update( time, randvar, data, self.measurementmodel, **kwargs ) class _DiscDiscExtendedKalman(ExtendedKalman): - """ - """ - def __init__(self, dynamod, measmod, initrv, **kwargs): """ Checks that dynamod and measmod are linear and moves on. @@ -98,7 +93,6 @@ def __init__(self, dynamod, measmod, initrv, **kwargs): super().__init__(dynamod, measmod, initrv) def predict(self, start, stop, randvar, **kwargs): - """ """ mean, covar = randvar.mean(), randvar.cov() if np.isscalar(mean) and np.isscalar(covar): mean, covar = mean * np.ones(1), covar * np.eye(1) @@ -110,15 +104,12 @@ def predict(self, start, stop, randvar, **kwargs): return RandomVariable(distribution=Normal(mpred, cpred)), crosscov def update(self, time, randvar, data, **kwargs): - """ """ return _discrete_extkalman_update( time, randvar, data, self.measurementmodel, **kwargs ) def _discrete_extkalman_update(time, randvar, data, measmod, **kwargs): - """ - """ mpred, cpred = randvar.mean(), randvar.cov() if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py index 22e1ec605..35d876964 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py @@ -77,12 +77,10 @@ def __init__(self, dynamod, measmod, initrv, **kwargs): super().__init__(dynamod, measmod, initrv) def predict(self, start, stop, randvar, **kwargs): - """ """ step = (stop - start) / self.cke_nsteps return self.dynamicmodel.chapmankolmogorov(start, stop, step, randvar, **kwargs) def update(self, time, randvar, data, **kwargs): - """ """ return _discrete_kalman_update( time, randvar, data, self.measurementmodel, **kwargs ) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py index d60f96f0d..ca39e23db 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py @@ -40,14 +40,12 @@ def __new__(cls, dynamod, measmod, initrv, alpha, beta, kappa, **kwargs): def _cont_disc(dynamod, measmod): - """ """ dyna_is_cont = issubclass(type(dynamod), ContinuousModel) meas_is_disc = issubclass(type(measmod), DiscreteModel) return dyna_is_cont and meas_is_disc def _disc_disc(dynamod, measmod): - """ """ dyna_is_disc = issubclass(type(dynamod), DiscreteModel) meas_is_disc = issubclass(type(measmod), DiscreteModel) return dyna_is_disc and meas_is_disc @@ -135,15 +133,12 @@ def _predict_nonlinear(self, start, randvar, **kwargs): return RandomVariable(distribution=Normal(mpred, cpred)), crosscov def update(self, time, randvar, data, **kwargs): - """ """ return _discrete_unskalman_update( time, randvar, data, self.measmod, self.ut, **kwargs ) def _discrete_unskalman_update(time, randvar, data, measmod, ut, **kwargs): - """ - """ if issubclass(type(measmod), DiscreteGaussianLinearModel): return _update_discrete_linear(time, randvar, data, measmod, **kwargs) else: @@ -151,8 +146,6 @@ def _discrete_unskalman_update(time, randvar, data, measmod, ut, **kwargs): def _update_discrete_linear(time, randvar, data, measmod, **kwargs): - """ - """ mpred, cpred = randvar.mean(), randvar.cov() if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) @@ -167,8 +160,6 @@ def _update_discrete_linear(time, randvar, data, measmod, **kwargs): def _update_discrete_nonlinear(time, randvar, data, measmod, ut, **kwargs): - """ - """ mpred, cpred = randvar.mean(), randvar.cov() if np.isscalar(mpred) and np.isscalar(cpred): mpred, cpred = mpred * np.ones(1), cpred * np.eye(1) diff --git a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py index c06c8d3db..0d1ae61ee 100644 --- a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py +++ b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py @@ -180,20 +180,14 @@ def __init__(self, driftmatrix, force, dispmatrix, diffmatrix): @property def driftmatrix(self): - """ - """ return self._driftmatrix @property def force(self): - """ - """ return self._force @property def dispersionmatrix(self): - """ - """ return self._dispmatrix def chapmankolmogorov(self, start, stop, step, randvar, **kwargs): diff --git a/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py b/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py index a1e637fa6..9f3e139ed 100644 --- a/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py +++ b/src/probnum/filtsmooth/statespace/discrete/discretegaussianmodel.py @@ -90,8 +90,6 @@ def pdf(self, loc, time, state, **kwargs): @property def ndim(self): - """ - """ return len(self.diffusionmatrix(0.0)) @@ -101,9 +99,6 @@ class DiscreteGaussianLinearModel(DiscreteGaussianModel): """ def __init__(self, dynamatfct, forcefct, diffmatfct): - """ - """ - def dynafct(t, x, **kwargs): return dynamatfct(t, **kwargs) @ x + forcefct(t, **kwargs) @@ -121,8 +116,6 @@ def dynamicsmatrix(self, time, **kwargs): return self.jacobian(time, None, **kwargs) def force(self, time, **kwargs): - """ - """ return self.forcefct(time, **kwargs) @@ -133,8 +126,6 @@ class DiscreteGaussianLTIModel(DiscreteGaussianLinearModel): """ def __init__(self, dynamat, forcevec, diffmat): - """ - """ super().__init__( lambda t, **kwargs: dynamat, lambda t, **kwargs: forcevec, diff --git a/src/probnum/filtsmooth/statespace/discrete/discretemodel.py b/src/probnum/filtsmooth/statespace/discrete/discretemodel.py index 80fcf17f8..a7f109580 100644 --- a/src/probnum/filtsmooth/statespace/discrete/discretemodel.py +++ b/src/probnum/filtsmooth/statespace/discrete/discretemodel.py @@ -1,6 +1,3 @@ -""" -""" - from abc import ABC, abstractmethod @@ -37,6 +34,4 @@ def pdf(self, loc, time, state, **kwargs): @property @abstractmethod def ndim(self): - """ - """ raise NotImplementedError From e66918d1e834952e12034991026fe8a60ec8465c Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:30:22 +0200 Subject: [PATCH 10/44] More exceptions for now --- pyproject.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index a839da965..b692daf16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,12 @@ missing-module-docstring, unused-wildcard-import, abstract-method, unused-argument, +too-many-branches, +arguments-differ, +redefined-builtin, +too-few-public-methods, +too-many-locals, +missing-function-docstring """ [tool.pylint.format] From c830000186ba35aab3d06005f1c13816b2ecf5e4 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:31:27 +0200 Subject: [PATCH 11/44] Fixed "trailing-whitespace" --- src/probnum/diffeq/odefiltsmooth/ivp2filter.py | 5 +++-- src/probnum/diffeq/odefiltsmooth/prior.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index 5bcff3e36..fa6ae4b74 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -4,6 +4,7 @@ """ import numpy as np + from probnum.filtsmooth import * from probnum.filtsmooth.statespace.discrete import DiscreteGaussianModel from probnum.prob import RandomVariable @@ -14,7 +15,7 @@ def ivp2ekf0(ivp, prior, evlvar): """ Computes measurement model and initial distribution for KF based on IVP and prior. - + **Initialdistribution:** Conditions the initial distribution of the Gaussian filter @@ -51,7 +52,7 @@ def ivp2ekf0(ivp, prior, evlvar): model, :math:`H_0` and :math:`H_1` become :math:`H_0 P^{-1}` and :math:`H_1 P^{-1}` which has to be taken into account. In this case, - + - EKF0 thinks :math:`J_g(m) = H_1 P^{-1}` - EKF1 thinks :math:`J_g(m) = H_1 P^{-1} - J_f(t, H_0 P^{-1} m(t)) (H_0 P^{-1})^\\top` - UKF again thinks: ''What is a Jacobian?'' diff --git a/src/probnum/diffeq/odefiltsmooth/prior.py b/src/probnum/diffeq/odefiltsmooth/prior.py index e0f474bda..83919e630 100644 --- a/src/probnum/diffeq/odefiltsmooth/prior.py +++ b/src/probnum/diffeq/odefiltsmooth/prior.py @@ -42,7 +42,7 @@ class ODEPrior(LTISDEModel): matrix that maps to filtering iteration to the Nordsieck vector, .. math:: P = \\text{diag }(1, h, h^2, ..., h^q). - + Here, :math:`h` is some expected average step size. Note that we ignored the factorials in this matrix. Our setting makes it easy to recover "no preconditioning" by choosing :math:`h=1`. From 1f633c3c664b806918365545126ee8d30d92d559 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:34:04 +0200 Subject: [PATCH 12/44] Fixed "unneeded-not" --- src/probnum/linalg/linearsolvers/matrixbased.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/probnum/linalg/linearsolvers/matrixbased.py b/src/probnum/linalg/linearsolvers/matrixbased.py index ab60719cb..7a903a0f7 100644 --- a/src/probnum/linalg/linearsolvers/matrixbased.py +++ b/src/probnum/linalg/linearsolvers/matrixbased.py @@ -461,13 +461,13 @@ def _compute_trace_Ainv_covfactor0(self, Y, unc_scale): if isinstance(self.Ainv_covfactor0, linops.ScalarMult): # Scalar prior mean - if self.is_calib_covclass and k > 0 and (not unc_scale == 0): + if self.is_calib_covclass and k > 0 and unc_scale != 0: _trace = self.Ainv_covfactor0.scalar * k else: _trace = self.Ainv_covfactor0.trace() else: # General prior mean - if self.is_calib_covclass and k > 0 and (not unc_scale == 0): + if self.is_calib_covclass and k > 0 and unc_scale != 0: # General prior mean with calibration covariance class _trace = np.trace( np.linalg.solve( From bdf419c7ced5a1445322132b7ade0b41f4e19648 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:38:49 +0200 Subject: [PATCH 13/44] Fixed "consider-using-in" --- src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py | 6 +++--- src/probnum/prob/distributions/dirac.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py index 057d1afa7..3b96fc982 100644 --- a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py @@ -357,11 +357,11 @@ def _string2filter(_ivp, _prior, _method, **kwargs): evlvar = kwargs["evlvar"] else: evlvar = 0.0 - if _method == "ekf0" or _method == "eks0": + if _method in ("ekf0", "eks0"): return ivp2filter.ivp2ekf0(_ivp, _prior, evlvar) - elif _method == "ekf1" or _method == "eks1": + elif _method in ("ekf1", "eks1"): return ivp2filter.ivp2ekf1(_ivp, _prior, evlvar) - elif _method == "ukf" or _method == "uks": + elif _method in ("ukf", "uks"): return ivp2filter.ivp2ukf(_ivp, _prior, evlvar) else: raise ValueError("Type of filter not supported.") diff --git a/src/probnum/prob/distributions/dirac.py b/src/probnum/prob/distributions/dirac.py index 8ab4887fc..030e9e27f 100644 --- a/src/probnum/prob/distributions/dirac.py +++ b/src/probnum/prob/distributions/dirac.py @@ -77,7 +77,7 @@ def cov(self): def sample(self, size=(), seed=None): ndims = len(self.shape) - if size == 1 or size == (): + if size == 1 or size == (): # pylint: disable=consider-using-in return self.parameters["support"] elif isinstance(size, int) and ndims == 0: return np.tile(A=self.parameters["support"], reps=size) From cbc113c5da9e926a2a3ae77019baf66cf3b25bfd Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:38:57 +0200 Subject: [PATCH 14/44] Fixed "consider-merging-isinstance" --- src/probnum/prob/randomvariable.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/probnum/prob/randomvariable.py b/src/probnum/prob/randomvariable.py index 6cb35f360..b89a30f81 100644 --- a/src/probnum/prob/randomvariable.py +++ b/src/probnum/prob/randomvariable.py @@ -394,8 +394,12 @@ def asrandvar(obj): shape=obj.shape, dtype=obj.dtype, distribution=Dirac(support=obj) ) # Scipy random variable - elif isinstance(obj, scipy.stats._distn_infrastructure.rv_frozen) or isinstance( - obj, scipy.stats._multivariate.multi_rv_frozen + elif isinstance( + obj, + ( + scipy.stats._distn_infrastructure.rv_frozen, + scipy.stats._multivariate.multi_rv_frozen, + ), ): return _scipystats_to_rv(scipydist=obj) else: From 84cc9af62e61d5764696b5f4346957a2df2ac345 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:42:26 +0200 Subject: [PATCH 15/44] Fixed "inconsistent-return-statements" --- src/probnum/filtsmooth/gaussfiltsmooth/kalmanposterior.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/kalmanposterior.py b/src/probnum/filtsmooth/gaussfiltsmooth/kalmanposterior.py index 7b7795336..167b97b38 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/kalmanposterior.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/kalmanposterior.py @@ -88,10 +88,10 @@ def __call__(self, t, smoothed=True): else: return pred_rv - elif t > self.locations[-1]: - if smoothed: - warn("`smoothed=True` is ignored for extrapolation.") - return self._predict_to_loc(t) + # else: t > self.locations[-1]: + if smoothed: + warn("`smoothed=True` is ignored for extrapolation.") + return self._predict_to_loc(t) def _predict_to_loc(self, loc): """Predict states at location `loc` from the closest, previous state""" From 9f528929affefac76720bafdc9b9f354ebfe347f Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:43:44 +0200 Subject: [PATCH 16/44] Fixed "simplifiable-if-statement" --- src/probnum/diffeq/steprule.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/probnum/diffeq/steprule.py b/src/probnum/diffeq/steprule.py index 89a1437bc..1310dad08 100644 --- a/src/probnum/diffeq/steprule.py +++ b/src/probnum/diffeq/steprule.py @@ -88,7 +88,4 @@ def suggest(self, laststep, errorest, **kwargs): return step def is_accepted(self, proposedstep, errorest, **kwargs): - if errorest * proposedstep < self.tol_per_step: - return True - else: - return False + return errorest * proposedstep < self.tol_per_step From 624b05ed33980e3f4d6c60d47700609e64b73412 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:51:26 +0200 Subject: [PATCH 17/44] Fixed "unidiomatic-typecheck" --- src/probnum/filtsmooth/statespace/continuous/continuousmodel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/probnum/filtsmooth/statespace/continuous/continuousmodel.py b/src/probnum/filtsmooth/statespace/continuous/continuousmodel.py index eb1f5061b..af2f550ec 100644 --- a/src/probnum/filtsmooth/statespace/continuous/continuousmodel.py +++ b/src/probnum/filtsmooth/statespace/continuous/continuousmodel.py @@ -49,7 +49,7 @@ def sample(self, start, stop, step, initstate, **kwargs): Returns a single element at the end of the time, not the entire array! """ - if type(initstate) != np.ndarray: + if not isinstance(initstate, np.ndarray): raise ValueError("Init state is not array!") times = np.arange(start, stop, step) currstate = initstate From c65f535373de6d24feca820f64134f70b2e986e8 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 21:51:42 +0200 Subject: [PATCH 18/44] Fixed "super-init-not-called" --- src/probnum/linalg/linops/linearoperators.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/probnum/linalg/linops/linearoperators.py b/src/probnum/linalg/linops/linearoperators.py index d9013a220..576524e6b 100644 --- a/src/probnum/linalg/linops/linearoperators.py +++ b/src/probnum/linalg/linops/linearoperators.py @@ -405,6 +405,7 @@ class Diagonal(LinearOperator): # TODO: should this be an operator itself or a function of a LinearOperator? # - a function allows subclasses (e.g. MatrixMult) to implement more efficient versions than n products e_i A e_i def __init__(self, Op): + # pylint: disable=super-init-not-called raise NotImplementedError From d1854000164bf2f36487357e08a29fc9c613a4f8 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 22:03:22 +0200 Subject: [PATCH 19/44] Fixed "wildcard-import" --- src/probnum/diffeq/odefiltsmooth/ivp2filter.py | 2 +- src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py | 1 - .../filtsmooth/gaussfiltsmooth/extendedkalman.py | 9 +++++++-- .../filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py | 4 +++- src/probnum/filtsmooth/gaussfiltsmooth/kalman.py | 9 +++++++-- .../filtsmooth/gaussfiltsmooth/unscentedkalman.py | 12 +++++++++--- 6 files changed, 27 insertions(+), 10 deletions(-) diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index fa6ae4b74..c9d4b6161 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -5,7 +5,7 @@ import numpy as np -from probnum.filtsmooth import * +from probnum.filtsmooth import ExtendedKalman, UnscentedKalman from probnum.filtsmooth.statespace.discrete import DiscreteGaussianModel from probnum.prob import RandomVariable from probnum.prob.distributions import Normal, Dirac diff --git a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py index 754d348e6..acde3c754 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/ivpfiltsmooth.py @@ -5,7 +5,6 @@ from probnum.prob.distributions import Normal from probnum.diffeq import odesolver from probnum.diffeq.odefiltsmooth.prior import ODEPrior -from probnum.filtsmooth import * from probnum.diffeq.odesolution import ODESolution diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py index 0d712db88..71ec35660 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py @@ -4,10 +4,15 @@ """ import numpy as np -from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth from probnum.prob import RandomVariable from probnum.prob.distributions import Normal -from probnum.filtsmooth.statespace import * +from probnum.filtsmooth.statespace import ( + ContinuousModel, + DiscreteModel, + LinearSDEModel, + DiscreteGaussianModel, +) class ExtendedKalman(GaussFiltSmooth): diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py index 90f9d50a1..2faccd105 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py @@ -1,10 +1,12 @@ """ Gaussian filtering. """ +from abc import ABC, abstractmethod + import numpy as np from probnum.prob import RandomVariable, Normal -from probnum.filtsmooth.bayesfiltsmooth import * +from probnum.filtsmooth.bayesfiltsmooth import BayesFiltSmooth from probnum.filtsmooth.gaussfiltsmooth.kalmanposterior import KalmanPosterior diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py index 35d876964..fc7eade19 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/kalman.py @@ -4,9 +4,14 @@ """ import numpy as np -from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth from probnum.prob import RandomVariable, Normal -from probnum.filtsmooth.statespace import * +from probnum.filtsmooth.statespace import ( + ContinuousModel, + DiscreteModel, + LinearSDEModel, + DiscreteGaussianLinearModel, +) class Kalman(GaussFiltSmooth): diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py index ca39e23db..eaf26c10e 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py @@ -7,10 +7,16 @@ import numpy as np -from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth.gaussfiltsmooth import GaussFiltSmooth from probnum.prob import RandomVariable, Normal -from probnum.filtsmooth.statespace import * -from probnum.filtsmooth.gaussfiltsmooth.unscentedtransform import * +from probnum.filtsmooth.gaussfiltsmooth.unscentedtransform import UnscentedTransform +from probnum.filtsmooth.statespace import ( + ContinuousModel, + DiscreteModel, + LinearSDEModel, + DiscreteGaussianModel, + DiscreteGaussianLinearModel, +) class UnscentedKalman(GaussFiltSmooth): From 2762eb9fbae8fe75313a09d3c586d55f6ffe8ace Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 22:16:36 +0200 Subject: [PATCH 20/44] Fixed "anomalous-backslash-in-string" --- src/probnum/diffeq/odefiltsmooth/ivp2filter.py | 8 ++++---- src/probnum/diffeq/odefiltsmooth/prior.py | 2 +- .../filtsmooth/gaussfiltsmooth/unscentedtransform.py | 2 +- .../filtsmooth/statespace/continuous/linearsdemodel.py | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index c9d4b6161..53266624d 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -22,12 +22,12 @@ def ivp2ekf0(ivp, prior, evlvar): onto the initial values. - If preconditioning is set to ``False``, it conditions - the initial distribution :math:`\mathcal{N}(0, I)` + the initial distribution :math:`\\mathcal{N}(0, I)` on the initial values :math:`(x_0, f(t_0, x_0), ...)` using as many available deri vatives as possible. - If preconditioning is set to ``True``, it conditions - the initial distribution :math:`\mathcal{N}(0, P P^\\top)` + the initial distribution :math:`\\mathcal{N}(0, P P^\\top)` on the initial values :math:`(x_0, f(t_0, x_0), ...)` using as many available derivatives as possible. Note that the projection matrices :math:`H_0` and :math:`H_1` @@ -36,7 +36,7 @@ def ivp2ekf0(ivp, prior, evlvar): **Measurement model:** - Returns a measurement model :math:`\mathcal{N}(g(m), R)` + Returns a measurement model :math:`\\mathcal{N}(g(m), R)` involving computing the discrepancy .. math:: g(m) = H_1 m(t) - f(t, H_0 m(t)). @@ -161,7 +161,7 @@ def diff(t, **kwargs): def _initialdistribution(ivp, prior): """ - Conditions initialdistribution :math:`\mathcal{N}(0, P P^\\top)` + Conditions initialdistribution :math:`\\mathcal{N}(0, P P^\\top)` on the initial values :math:`(x_0, f(t_0, x_0), ...)` using as many available derivatives as possible. diff --git a/src/probnum/diffeq/odefiltsmooth/prior.py b/src/probnum/diffeq/odefiltsmooth/prior.py index 83919e630..d2f8fdbda 100644 --- a/src/probnum/diffeq/odefiltsmooth/prior.py +++ b/src/probnum/diffeq/odefiltsmooth/prior.py @@ -394,7 +394,7 @@ def _driftmat_matern(ordint, spatialdim, lengthscale): def _dispmat(ordint, spatialdim, diffconst): """ - Returns I_D \otimes L + Returns I_D \\otimes L diffconst = sigma**2 """ dispvec = diffconst * np.eye(ordint + 1)[:, -1] diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py index 9ea8582d8..a1b9b60bb 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py @@ -95,7 +95,7 @@ def propagate(self, time, sigmapts, modelfct): Time :math:`t` which is passed on to the modelfunction. sigmapts : np.ndarray, shape=(2 N+1, N) Sigma points (N is the spatial dimension of the dynamic model) - modelfct : callable, signature=(t, x, \*\*kwargs) + modelfct : callable, signature=(t, x, **kwargs) Function through which to propagate Returns diff --git a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py index 0d1ae61ee..dee581c99 100644 --- a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py +++ b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py @@ -25,15 +25,15 @@ class LinearSDEModel(continuousmodel.ContinuousModel): Parameters ---------- - driftmatrixfct : callable, signature=(t, \*\*kwargs) + driftmatrixfct : callable, signature=(t, **kwargs) This is F = F(t). The evaluations of this function are called the drift(matrix) of the SDE. Returns np.ndarray with shape=(n, n) - forcfct : callable, signature=(t, \*\*kwargs) + forcfct : callable, signature=(t, **kwargs) This is u = u(t). Evaluations of this function are called the force(vector) of the SDE. Returns np.ndarray with shape=(n,) - dispmatrixfct : callable, signature=(t, \*\*kwargs) + dispmatrixfct : callable, signature=(t, **kwargs) This is L = L(t). Evaluations of this function are called the dispersion(matrix) of the SDE. Returns np.ndarray with shape=(n, s) From 21c97c7d6c97f962e89a304cd0e1733764ecf6e5 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 22:19:19 +0200 Subject: [PATCH 21/44] Many more disabled messages, but all checks pass --- pyproject.toml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b692daf16..83ff3079f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,18 @@ arguments-differ, redefined-builtin, too-few-public-methods, too-many-locals, -missing-function-docstring +missing-function-docstring, +wrong-import-order, +no-self-use, +too-many-lines, +unused-variable, +broad-except, +abstract-class-instantiated, +protected-access, + +too-many-instance-attributes, +too-many-statements, +attribute-defined-outside-init, """ [tool.pylint.format] From a5f7ceea9a0dde28c8b649c7f76d1629109cabfc Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 22:19:38 +0200 Subject: [PATCH 22/44] Fixed "unused-wildcard-import" --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 83ff3079f..5e90081de 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,6 @@ line-too-long, fixme, too-many-arguments, missing-module-docstring, -unused-wildcard-import, abstract-method, unused-argument, too-many-branches, From 8834b33afe8edc065ae2b4d57f268c08887bf96d Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Mon, 17 Aug 2020 22:57:31 +0200 Subject: [PATCH 23/44] Fixed "line-too-long" --- pyproject.toml | 1 - src/probnum/diffeq/__init__.py | 3 +- src/probnum/diffeq/ode/ode.py | 2 +- .../diffeq/odefiltsmooth/ivp2filter.py | 2 +- src/probnum/diffeq/odefiltsmooth/prior.py | 3 +- .../gaussfiltsmooth/gaussfiltsmooth.py | 5 +- src/probnum/linalg/__init__.py | 3 +- .../linalg/linearsolvers/linearsolvers.py | 189 ++++++---- .../linalg/linearsolvers/matrixbased.py | 353 ++++++++++-------- .../linalg/linearsolvers/solutionbased.py | 21 +- src/probnum/linalg/linops/__init__.py | 5 +- src/probnum/linalg/linops/kronecker.py | 91 +++-- src/probnum/linalg/linops/linearoperators.py | 76 ++-- src/probnum/prob/__init__.py | 6 +- .../prob/distributions/distribution.py | 81 ++-- src/probnum/prob/distributions/normal.py | 14 +- src/probnum/prob/randomvariable.py | 45 ++- src/probnum/quad/__init__.py | 7 +- src/probnum/quad/bayesian/bayesquadrature.py | 18 +- src/probnum/quad/polynomial/clenshawcurtis.py | 13 +- .../quad/polynomial/polynomialquadrature.py | 6 +- src/probnum/utils/arrayutils.py | 4 +- 22 files changed, 553 insertions(+), 395 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5e90081de..e83c80bad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,6 @@ invalid-name, no-else-return, no-else-raise, -line-too-long, fixme, too-many-arguments, missing-module-docstring, diff --git a/src/probnum/diffeq/__init__.py b/src/probnum/diffeq/__init__.py index 05a3afa36..af6c8b486 100644 --- a/src/probnum/diffeq/__init__.py +++ b/src/probnum/diffeq/__init__.py @@ -27,7 +27,8 @@ "ODESolution", ] -# Set correct module paths (for superclasses). Corrects links and module paths in documentation. +# Set correct module paths (for superclasses). +# Corrects links and module paths in documentation. ODE.__module__ = "probnum.diffeq" ODESolver.__module__ = "probnum.diffeq" StepRule.__module__ = "probnum.diffeq" diff --git a/src/probnum/diffeq/ode/ode.py b/src/probnum/diffeq/ode/ode.py index f23268d62..49541794f 100644 --- a/src/probnum/diffeq/ode/ode.py +++ b/src/probnum/diffeq/ode/ode.py @@ -102,7 +102,7 @@ def hessian(self, t, y, **kwargs): since for any directions :math:`v_1, v_2` the outcome of :math:`H_f(t_0, y_0) \\cdot v_1 \\cdot v_2` is expected to contain the incline of :math:`f_i` in direction :math:`(v_1, v_2)`. - """ + """ # pylint: disable=line-too-long if self._hess is None: raise NotImplementedError else: diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index 53266624d..10ac65678 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -66,7 +66,7 @@ def ivp2ekf0(ivp, prior, evlvar): evlvar : float, measurement variance; in the literature, this is "R" - """ + """ # pylint: disable=line-too-long measmod = _measmod_ekf0(ivp, prior, evlvar) initrv = _initialdistribution(ivp, prior) return ExtendedKalman(prior, measmod, initrv) diff --git a/src/probnum/diffeq/odefiltsmooth/prior.py b/src/probnum/diffeq/odefiltsmooth/prior.py index d2f8fdbda..993824eca 100644 --- a/src/probnum/diffeq/odefiltsmooth/prior.py +++ b/src/probnum/diffeq/odefiltsmooth/prior.py @@ -283,7 +283,8 @@ def _trans_ibm(self, start, stop): step = stop - start # This seems like the faster solution compared to fully vectorising. - # I suspect it is because np.math.factorial is much faster than scipy.special.factorial + # I suspect it is because np.math.factorial is much faster than + # scipy.special.factorial ah_1d = np.diag(np.ones(self.ordint + 1), 0) for i in range(self.ordint): offdiagonal = ( diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py index 2faccd105..beeaea3e1 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/gaussfiltsmooth.py @@ -16,7 +16,7 @@ class GaussFiltSmooth(BayesFiltSmooth, ABC): """ def __init__(self, dynamod, measmod, initrv): - """ Check that the initial distribution is Gaussian. """ + """Check that the initial distribution is Gaussian.""" if not issubclass(type(initrv.distribution), Normal): raise ValueError( "Gaussian filters/smoothers need initial " @@ -121,7 +121,8 @@ def filter_step(self, start, stop, randvar, data, **kwargs): stop : float Predict TO this time point. randvar : RandomVariable - Predict based on this random variable. For instance, this can be the result of a previous call to filter_step. + Predict based on this random variable. For instance, this can be the result + of a previous call to filter_step. data : array_like Compute the update based on this data. diff --git a/src/probnum/linalg/__init__.py b/src/probnum/linalg/__init__.py index d74446916..c224c29bf 100644 --- a/src/probnum/linalg/__init__.py +++ b/src/probnum/linalg/__init__.py @@ -1,7 +1,8 @@ """ Linear Algebra. -This package implements common operations and (probabilistic) numerical methods for linear algebra. +This package implements common operations and (probabilistic) numerical methods for +linear algebra. """ from probnum.linalg.linearsolvers import * diff --git a/src/probnum/linalg/linearsolvers/linearsolvers.py b/src/probnum/linalg/linearsolvers/linearsolvers.py index cae760802..217256be8 100644 --- a/src/probnum/linalg/linearsolvers/linearsolvers.py +++ b/src/probnum/linalg/linearsolvers/linearsolvers.py @@ -1,9 +1,10 @@ """ Probabilistic numerical methods for solving linear systems. -This module provides routines to solve linear systems of equations in a Bayesian framework. This means that a prior -distribution over elements of the linear system can be provided and is updated with information collected by the solvers -to return a posterior distribution. +This module provides routines to solve linear systems of equations in a Bayesian +framework. This means that a prior distribution over elements of the linear system can +be provided and is updated with information collected by the solvers to return a +posterior distribution. """ import warnings @@ -42,32 +43,40 @@ def problinsolve( .. math:: Ax=b, - where :math:`A \\in \\mathbb{R}^{n \\times n}` and :math:`b \\in \\mathbb{R}^{n}`. They return a probability measure - which quantifies uncertainty in the output arising from finite computational resources. This solver can take prior - information either on the linear operator :math:`A` or its inverse :math:`H=A^{-1}` in - the form of a random variable ``A0`` or ``Ainv0`` and outputs a posterior belief over :math:`A` or :math:`H`. This - code implements the method described in Wenger et al. [1]_ based on the work in Hennig et al. [2]_. + where :math:`A \\in \\mathbb{R}^{n \\times n}` and :math:`b \\in \\mathbb{R}^{n}`. + They return a probability measure which quantifies uncertainty in the output arising + from finite computational resources. This solver can take prior information either + on the linear operator :math:`A` or its inverse :math:`H=A^{-1}` in the form of a + random variable ``A0`` or ``Ainv0`` and outputs a posterior belief over :math:`A` or + :math:`H`. This code implements the method described in Wenger et al. [1]_ based on + the work in Hennig et al. [2]_. Parameters ---------- A : array-like or LinearOperator, shape=(n,n) - A square linear operator (or matrix). Only matrix-vector products :math:`Av` are used internally. + A square linear operator (or matrix). Only matrix-vector products :math:`Av` are + used internally. b : array_like or RandomVariable, shape=(n,) or (n, nrhs) - Right-hand side vector, matrix or random variable in :math:`A x = b`. For multiple right hand sides, ``nrhs`` - problems are solved sequentially with the posteriors over the matrices acting as priors for subsequent solves. - If the right-hand-side is assumed to be noisy, every iteration of the solver samples a realization from ``b``. + Right-hand side vector, matrix or random variable in :math:`A x = b`. For + multiple right hand sides, ``nrhs`` problems are solved sequentially with the + posteriors over the matrices acting as priors for subsequent solves. + If the right-hand-side is assumed to be noisy, every iteration of the solver + samples a realization from ``b``. A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. + Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. This can be viewed as taking the form + of a pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. assume_A : str, default="sympos" - Assumptions on the linear operator which can influence solver choice and behavior. The available options are - (combinations of) + Assumptions on the linear operator which can influence solver choice and + behavior. The available options are (combinations of) ==================== ========= generic matrix ``gen`` @@ -77,22 +86,25 @@ def problinsolve( ==================== ========= maxiter : int, optional - Maximum number of iterations. Defaults to :math:`10n`, where :math:`n` is the dimension of :math:`A`. + Maximum number of iterations. Defaults to :math:`10n`, where :math:`n` is the + dimension of :math:`A`. atol : float, optional Absolute convergence tolerance. rtol : float, optional Relative convergence tolerance. callback : function, optional - User-supplied function called after each iteration of the linear solver. It is called as - ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs)`` and can be used to return quantities from the - iteration. Note that depending on the function supplied, this can slow down the solver considerably. + User-supplied function called after each iteration of the linear solver. It is + called as ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs)`` and can + be used to return quantities from the iteration. Note that depending on the + function supplied, this can slow down the solver considerably. kwargs : optional Optional keyword arguments passed onto the solver iteration. Returns ------- x : RandomVariable, shape=(n,) or (n, nrhs) - Approximate solution :math:`x` to the linear system. Shape of the return matches the shape of ``b``. + Approximate solution :math:`x` to the linear system. Shape of the return matches + the shape of ``b``. A : RandomVariable, shape=(n,n) Posterior belief over the linear operator. Ainv : RandomVariable, shape=(n,n) @@ -111,14 +123,18 @@ def problinsolve( Notes ----- - For a specific class of priors the posterior mean of :math:`x_k=Hb` coincides with the iterates of the conjugate - gradient method. The matrix-based view taken here recovers the solution-based inference of :func:`bayescg` [3]_. + For a specific class of priors the posterior mean of :math:`x_k=Hb` coincides with + the iterates of the conjugate gradient method. The matrix-based view taken here + recovers the solution-based inference of :func:`bayescg` [3]_. References ---------- - .. [1] Wenger, J. and Hennig, P., Probabilistic Linear Solvers for Machine Learning, 2020 - .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on Optimization*, 2015, 25, 234-260 - .. [3] Bartels, S. et al., Probabilistic Linear Solvers: A Unifying View, *Statistics and Computing*, 2019 + .. [1] Wenger, J. and Hennig, P., Probabilistic Linear Solvers for Machine Learning, + 2020 + .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on + Optimization*, 2015, 25, 234-260 + .. [3] Bartels, S. et al., Probabilistic Linear Solvers: A Unifying View, + *Statistics and Computing*, 2019 See Also -------- @@ -166,7 +182,8 @@ def problinsolve( maxiter = n * 10 if nrhs > 1: - # Iteratively solve for multiple right hand sides (with posteriors as new priors) + # Iteratively solve for multiple right hand sides (with posteriors as new + # priors) for i in range(nrhs): if i > 0: x = None # Only use prior information on Ainv for multiple rhs @@ -208,37 +225,43 @@ def bayescg(A, b, x0=None, maxiter=None, atol=None, rtol=None, callback=None): """ Conjugate Gradients using prior information on the solution of the linear system. - In the setting where :math:`A` is a symmetric positive-definite matrix, this solver takes prior information - on the solution and outputs a posterior belief over :math:`x`. This code implements the - method described in Cockayne et al. [1]_. + In the setting where :math:`A` is a symmetric positive-definite matrix, this solver + takes prior information on the solution and outputs a posterior belief over + :math:`x`. This code implements the method described in Cockayne et al. [1]_. - Note that the solution-based view of BayesCG and the matrix-based view of :meth:`problinsolve` correspond [2]_. + Note that the solution-based view of BayesCG and the matrix-based view of + :meth:`problinsolve` correspond [2]_. Parameters ---------- A : array-like or LinearOperator, shape=(n,n) - A square linear operator (or matrix). Only matrix-vector products :math:`Av` are used internally. + A square linear operator (or matrix). Only matrix-vector products :math:`Av` are + used internally. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. x0 : array-like or RandomVariable, shape=(n,) or or (n, nrhs) Prior belief over the solution of the linear system. maxiter : int - Maximum number of iterations. Defaults to :math:`10n`, where :math:`n` is the dimension of :math:`A`. + Maximum number of iterations. Defaults to :math:`10n`, where :math:`n` is the + dimension of :math:`A`. atol : float, optional - Absolute residual tolerance. If :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert < \\text{atol}`, the - iteration terminates. + Absolute residual tolerance. If :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b + \\rVert < \\text{atol}`, the iteration terminates. rtol : float, optional - Relative residual tolerance. If :math:`\\lVert r_i \\rVert < \\text{rtol} \\lVert b \\rVert`, the - iteration terminates. + Relative residual tolerance. If :math:`\\lVert r_i \\rVert < \\text{rtol} + \\lVert b \\rVert`, the iteration terminates. callback : function, optional - User-supplied function called after each iteration of the linear solver. It is called as - ``callback(xk, sk, yk, alphak, resid, **kwargs)`` and can be used to return quantities from the iteration. Note - that depending on the function supplied, this can slow down the solver. + User-supplied function called after each iteration of the linear solver. It is + called as ``callback(xk, sk, yk, alphak, resid, **kwargs)`` and can be used to + return quantities from the iteration. Note that depending on the function + supplied, this can slow down the solver. References ---------- - .. [1] Cockayne, J. et al., A Bayesian Conjugate Gradient Method, *Bayesian Analysis*, 2019, 14, 937-1012 - .. [2] Bartels, S. et al., Probabilistic Linear Solvers: A Unifying View, *Statistics and Computing*, 2019 + .. [1] Cockayne, J. et al., A Bayesian Conjugate Gradient Method, *Bayesian + Analysis*, 2019, 14, 937-1012 + .. [2] Bartels, S. et al., Probabilistic Linear Solvers: A Unifying View, + *Statistics and Computing*, 2019 See Also -------- @@ -270,22 +293,25 @@ def _check_linear_system(A, b, A0=None, Ainv0=None, x0=None): """ Check linear system compatibility. - Raises an exception if the input arguments are not of the right type or not compatible. + Raises an exception if the input arguments are not of the right type or not + compatible. Parameters ---------- A : array-like or LinearOperator, shape=(n,n) - A square linear operator (or matrix). Only matrix-vector products :math:`Av` are used internally. + A square linear operator (or matrix). Only matrix-vector products :math:`Av` are + used internally. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. Raises ------ @@ -312,7 +338,8 @@ def _check_linear_system(A, b, A0=None, Ainv0=None, x0=None): raise ValueError("The prior belief over A must be a random variable.") if Ainv0 is not None and not isinstance(Ainv0, linop_types): raise ValueError( - "The inverse of A must be either an array, a linear operator or a random variable of either." + "The inverse of A must be either an array, a linear operator or " + "a random variable of either." ) if x0 is not None and not isinstance(x0, vector_types): raise ValueError("The initial guess for the solution must be a (sparse) array.") @@ -322,7 +349,8 @@ def _check_linear_system(A, b, A0=None, Ainv0=None, x0=None): isinstance(A0, prob.RandomVariable) or isinstance(Ainv0, prob.RandomVariable) ) and isinstance(x0, prob.RandomVariable): raise ValueError( - "Cannot specify distributions on the linear operator and the solution simultaneously." + "Cannot specify distributions on the linear operator and the solution " + "simultaneously." ) # Dimension mismatch @@ -353,11 +381,13 @@ def _preprocess_linear_system(A, b, x0=None): Parameters ---------- A : array-like or LinearOperator, shape=(n,n) - A square linear operator (or matrix). Only matrix-vector products :math:`Av` are used internally. + A square linear operator (or matrix). Only matrix-vector products :math:`Av` are + used internally. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. Returns ------- @@ -366,7 +396,8 @@ def _preprocess_linear_system(A, b, x0=None): b : array-like, shape=(n,) or (n, nrhs) Right-hand-side of the linear system. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. """ # Transform linear system to correct dimensions if not isinstance(b, prob.RandomVariable): @@ -379,26 +410,28 @@ def _preprocess_linear_system(A, b, x0=None): def _init_solver(A, b, A0, Ainv0, x0, assume_A): """ - Selects and initializes an appropriate instance of the probabilistic linear solver based on the system properties - and prior information given. + Selects and initializes an appropriate instance of the probabilistic linear solver + based on the system properties and prior information given. Parameters ---------- A : array-like or LinearOperator, shape=(n,n) - A square linear operator (or matrix). Only matrix-vector products :math:`Av` are used internally. + A square linear operator (or matrix). Only matrix-vector products :math:`Av` are + used internally. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. assume_A : str - Assumptions on the linear operator, which can influence solver choice or behavior. The available options are - (combinations of) + Assumptions on the linear operator, which can influence solver choice or + behavior. The available options are (combinations of) ==================== ========= generic matrix ``gen`` @@ -410,13 +443,15 @@ def _init_solver(A, b, A0, Ainv0, x0, assume_A): Returns ------- linear_solver : ProbabilisticLinearSolver - A type of probabilistic linear solver implementing the solve method for linear systems. + A type of probabilistic linear solver implementing the solve method for linear + systems. """ # Choose matrix based view if not clear from arguments if (Ainv0 is not None or A0 is not None) and isinstance(x0, prob.RandomVariable): warnings.warn( - "Cannot use prior uncertainty on both the matrix (inverse) and the solution. The latter will be ignored." + "Cannot use prior uncertainty on both the matrix (inverse) and the " + "solution. The latter will be ignored." ) x0 = x0.mean() @@ -435,8 +470,8 @@ def _init_solver(A, b, A0, Ainv0, x0, assume_A): and "noise" in assume_A ): warnings.warn( - "A is assumed to be noisy, but is neither a random variable nor a linear operator. Use exact " - "probabilistic linear solver instead." + "A is assumed to be noisy, but is neither a random variable nor a " + "linear operator. Use exact probabilistic linear solver instead." ) # Solution-based view @@ -461,7 +496,8 @@ def _postprocess(info, A): """ Postprocess the linear system and its solution. - Raises exceptions or warnings based on the properties of the linear system and the solver iteration. + Raises exceptions or warnings based on the properties of the linear system and the + solver iteration. Parameters ---------- @@ -494,9 +530,10 @@ def _postprocess(info, A): # Ill-conditioned matrix A if rel_cond is not None and 1 / rel_cond < machine_eps: warnings.warn( - "Ill-conditioned matrix detected (estimated rcond={:.6g}). Results are likely inaccurate.".format( - rel_cond - ), + ( + "Ill-conditioned matrix detected (estimated rcond={:.6g}). " + "Results are likely inaccurate." + ).format(rel_cond), scipy.linalg.LinAlgWarning, stacklevel=3, ) diff --git a/src/probnum/linalg/linearsolvers/matrixbased.py b/src/probnum/linalg/linearsolvers/matrixbased.py index 7a903a0f7..9fe3225f4 100644 --- a/src/probnum/linalg/linearsolvers/matrixbased.py +++ b/src/probnum/linalg/linearsolvers/matrixbased.py @@ -1,8 +1,8 @@ """ Matrix-based probabilistic linear solvers. -Implementations of matrix-based linear solvers which perform inference on the matrix or its inverse given linear -observations. +Implementations of matrix-based linear solvers which perform inference on the matrix or +its inverse given linear observations. """ import warnings import abc @@ -18,15 +18,16 @@ class ProbabilisticLinearSolver(abc.ABC): """ An abstract base class for probabilistic linear solvers. - This class is designed to be subclassed with new (probabilistic) linear solvers, which implement a ``.solve()`` - method. Objects of this type are instantiated in wrapper functions such as :meth:``problinsolve``. + This class is designed to be subclassed with new (probabilistic) linear solvers, + which implement a ``.solve()`` method. Objects of this type are instantiated in + wrapper functions such as :meth:``problinsolve``. Parameters ---------- A : array-like or LinearOperator or RandomVariable, shape=(n,n) A square matrix or linear operator. A prior distribution can be provided as a - :class:`~probnum.prob.RandomVariable`. If an array or linear operator is given, a prior distribution is - chosen automatically. + :class:`~probnum.prob.RandomVariable`. If an array or linear operator is given, + a prior distribution is chosen automatically. b : RandomVariable, shape=(n,) or (n, nrhs) Right-hand side vector, matrix or RandomVariable of :math:`A x = b`. """ @@ -40,7 +41,8 @@ def has_converged(self, iter, maxiter, **kwargs): """ Check convergence of a linear solver. - Evaluates a set of convergence criteria based on its input arguments to decide whether the iteration has converged. + Evaluates a set of convergence criteria based on its input arguments to decide + whether the iteration has converged. Parameters ---------- @@ -59,7 +61,7 @@ def has_converged(self, iter, maxiter, **kwargs): # maximum iterations if iter >= maxiter: warnings.warn( - message="Iteration terminated. Solver reached the maximum number of iterations." + "Iteration terminated. Solver reached the maximum number of iterations." ) return True, "maxiter" else: @@ -72,17 +74,19 @@ def solve(self, callback=None, **kwargs): Parameters ---------- callback : function, optional - User-supplied function called after each iteration of the linear solver. It is called as - ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs)`` and can be used to return quantities from the - iteration. Note that depending on the function supplied, this can slow down the solver. + User-supplied function called after each iteration of the linear solver. It + is called as ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs)`` + and can be used to return quantities from the iteration. Note that depending + on the function supplied, this can slow down the solver. kwargs - Key-word arguments adjusting the behaviour of the ``solve`` iteration. These are usually convergence - criteria. + Key-word arguments adjusting the behaviour of the ``solve`` iteration. These + are usually convergence criteria. Returns ------- x : RandomVariable, shape=(n,) or (n, nrhs) - Approximate solution :math:`x` to the linear system. Shape of the return matches the shape of ``b``. + Approximate solution :math:`x` to the linear system. Shape of the return + matches the shape of ``b``. A : RandomVariable, shape=(n,n) Posterior belief over the linear operator. Ainv : RandomVariable, shape=(n,n) @@ -102,7 +106,8 @@ class MatrixBasedSolver(ProbabilisticLinearSolver, abc.ABC): ---------- A : array-like or LinearOperator or RandomVariable, shape=(n,n) A square matrix or linear operator. A prior distribution can be provided as a - :class:`~probnum.prob.RandomVariable`. If an array or linear operator is given, a prior distribution is + :class:`~probnum.prob.RandomVariable`. If an array or linear operator is given, + a prior distribution is chosen automatically. b : RandomVariable, shape=(n,) or (n, nrhs) Right-hand side vector, matrix or RandomVariable of :math:`A x = b`. @@ -119,15 +124,17 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Parameters ---------- A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. + Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. This can be viewed as taking the + form of a pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``A0`` or ``Ainv0`` is - given. + Optional. Prior belief for the solution of the linear system. Will be + ignored if ``A0`` or ``Ainv0`` is given. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. """ @@ -135,10 +142,12 @@ def _get_prior_params(self, A0, Ainv0, x0, b): def _construct_symmetric_matrix_prior_means(self, A, x0, b): """ - Create matrix prior means from an initial guess for the solution of the linear system. + Create matrix prior means from an initial guess for the solution of the linear + system. - Constructs a matrix-variate prior mean for H from ``x0`` and ``b`` such that :math:`H_0b = x_0`, :math:`H_0` - symmetric positive definite and :math:`A_0 = H_0^{-1}`. + Constructs a matrix-variate prior mean for H from ``x0`` and ``b`` such that + :math:`H_0b = x_0`, :math:`H_0` symmetric positive definite and + :math:`A_0 = H_0^{-1}`. Parameters ---------- @@ -152,11 +161,14 @@ def _construct_symmetric_matrix_prior_means(self, A, x0, b): Returns ------- A0_mean : linops.LinearOperator - Mean of the matrix-variate prior distribution on the system matrix :math:`A`. + Mean of the matrix-variate prior distribution on the system matrix + :math:`A`. Ainv0_mean : linops.LinearOperator - Mean of the matrix-variate prior distribution on the inverse of the system matrix :math:`H = A^{-1}`. + Mean of the matrix-variate prior distribution on the inverse of the system + matrix :math:`H = A^{-1}`. """ - # Check inner product between x0 and b; if negative or zero, choose better initialization + # Check inner product between x0 and b; if negative or zero, choose better + # initialization bx0 = np.squeeze(b.T @ x0) bb = np.linalg.norm(b) ** 2 if bx0 < 0: @@ -226,7 +238,8 @@ class SymmetricMatrixBasedSolver(MatrixBasedSolver): """ Symmetric matrix-based probabilistic linear solver. - Implements the solve iteration of the symmetric matrix-based probabilistic linear solver described in [1]_ and [2]_. + Implements the solve iteration of the symmetric matrix-based probabilistic linear + solver described in [1]_ and [2]_. Parameters ---------- @@ -235,14 +248,17 @@ class SymmetricMatrixBasedSolver(MatrixBasedSolver): b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. A0 : array-like or LinearOperator or RandomVariable, shape=(n, n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. + Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. This can be viewed as taking the form + of a pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. Returns ------- @@ -257,12 +273,15 @@ class SymmetricMatrixBasedSolver(MatrixBasedSolver): References ---------- - .. [1] Wenger, J. and Hennig, P., Probabilistic Linear Solvers for Machine Learning, 2020 - .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on Optimization*, 2015, 25, 234-260 + .. [1] Wenger, J. and Hennig, P., Probabilistic Linear Solvers for Machine Learning, + 2020 + .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on + Optimization*, 2015, 25, 234-260 See Also -------- - NoisySymmetricMatrixBasedSolver : Class implementing the noisy symmetric probabilistic linear solver. + NoisySymmetricMatrixBasedSolver : + Class implementing the noisy symmetric probabilistic linear solver. """ def __init__(self, A, b, A0=None, Ainv0=None, x0=None): @@ -311,15 +330,17 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Parameters ---------- A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is - given, a prior distribution is chosen automatically. + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is + given, a prior distribution is chosen automatically. Ainv0 : array-like or + LinearOperator or RandomVariable, shape=(n,n), optional A square matrix, + linear operator or random variable representing the prior belief over the + inverse :math:`H=A^{-1}`. This can be viewed as taking the form of a + pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``A0`` or ``Ainv0`` is - given. + Optional. Prior belief for the solution of the linear system. Will be + ignored if ``A0`` or ``Ainv0`` is given. b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. @@ -328,13 +349,13 @@ def _get_prior_params(self, A0, Ainv0, x0, b): A0_mean : array-like or LinearOperator, shape=(n,n) Prior mean of the linear operator :math:`A`. A0_covfactor : array-like or LinearOperator, shape=(n,n) - Factor :math:`W^A` of the symmetric Kronecker product prior covariance :math:`W^A \\otimes_s W^A` of - :math:`A`. + Factor :math:`W^A` of the symmetric Kronecker product prior covariance + :math:`W^A \\otimes_s W^A` of :math:`A`. Ainv0_mean : array-like or LinearOperator, shape=(n,n) Prior mean of the linear operator :math:`H`. Ainv0_covfactor : array-like or LinearOperator, shape=(n,n) - Factor :math:`W^H` of the symmetric Kronecker product prior covariance :math:`W^H \\otimes_s W^H` of - :math:`H`. + Factor :math:`W^H` of the symmetric Kronecker product prior covariance + :math:`W^H \\otimes_s W^H` of :math:`H`. """ self.is_calib_covclass = False # No matrix priors specified @@ -378,15 +399,16 @@ def _get_prior_params(self, A0, Ainv0, x0, b): A0_mean = Ainv0.inv() except AttributeError: warnings.warn( - message="Prior specified only for Ainv. Inverting prior mean naively. " - + "This operation is computationally costly! Specify an inverse prior (mean) instead." + "Prior specified only for Ainv. Inverting prior mean naively. " + "This operation is computationally costly! Specify an inverse " + "prior (mean) instead." ) A0_mean = np.linalg.inv(Ainv0.mean()) except NotImplementedError: A0_mean = linops.Identity(self.n) warnings.warn( - message="Prior specified only for Ainv. Automatic prior mean inversion not implemented, " - + "falling back to standard normal prior." + "Prior specified only for Ainv. Automatic prior mean inversion " + "not implemented, falling back to standard normal prior." ) # Symmetric posterior correspondence A0_covfactor = self.A @@ -410,15 +432,16 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Ainv0_mean = A0.inv() except AttributeError: warnings.warn( - message="Prior specified only for A. Inverting prior mean naively. " - + "This operation is computationally costly! Specify an inverse prior (mean)." + "Prior specified only for A. Inverting prior mean naively. " + "This operation is computationally costly! " + "Specify an inverse prior (mean)." ) Ainv0_mean = np.linalg.inv(A0.mean()) except NotImplementedError: Ainv0_mean = linops.Identity(self.n) warnings.warn( - message="Prior specified only for A. " - + "Automatic prior mean inversion failed, falling back to standard normal prior." + "Prior specified only for A. Automatic prior mean inversion " + "failed, falling back to standard normal prior." ) # Symmetric posterior correspondence Ainv0_covfactor = Ainv0_mean @@ -484,14 +507,16 @@ def _compute_trace_Ainv_covfactor0(self, Y, unc_scale): def _compute_trace_solution_covariance(self, bWb, Wb): """ - Computes the trace of the solution covariance :math:`\\tr(\\operatorname{Cov}[x])` + Computes the trace of the solution covariance + :math:`\\tr(\\operatorname{Cov}[x])` Parameters ---------- bWb : float Inner product of right hand side and the inverse covariance factor. Wb : np.ndarray - Matrix-vector product between the inverse covariance factor and the right hand side. + Matrix-vector product between the inverse covariance factor and the right + hand side. Returns ------- @@ -505,7 +530,8 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): """ Check convergence of a linear solver. - Evaluates a set of convergence criteria based on its input arguments to decide whether the iteration has converged. + Evaluates a set of convergence criteria based on its input arguments to decide + whether the iteration has converged. Parameters ---------- @@ -514,7 +540,8 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): maxiter : int Maximum number of iterations resid : array-like - Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of the current iteration. + Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of + the current iteration. atol : float Absolute residual tolerance. Stops if :math:`\\min(\\lVert r_i \\rVert, \\sqrt{\\operatorname{tr}(\\operatorname{Cov}(x))}) \\leq \\text{atol}`. @@ -528,11 +555,11 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): True if the method has converged. convergence_criterion : str Convergence criterion which caused termination. - """ + """ # pylint: disable=line-too-long # maximum iterations if iter >= maxiter: warnings.warn( - message="Iteration terminated. Solver reached the maximum number of iterations." + "Iteration terminated. Solver reached the maximum number of iterations." ) return True, "maxiter" # residual below error tolerance @@ -554,9 +581,10 @@ def _calibrate_uncertainty(self, S, sy, method): """ Calibrate uncertainty based on the Rayleigh coefficients - A regression model for the log-Rayleigh coefficient is built based on the collected observations. The degrees of - freedom in the kernels of A and H are set according to the predicted log-Rayleigh coefficient for the - remaining unexplored dimensions. + A regression model for the log-Rayleigh coefficient is built based on the + collected observations. The degrees of freedom in the kernels of A and H are set + according to the predicted log-Rayleigh coefficient for the remaining unexplored + dimensions. Parameters ---------- @@ -565,7 +593,8 @@ def _calibrate_uncertainty(self, S, sy, method): sy : np.ndarray Array of inner products ``s_i'As_i`` method : str - Type of calibration method to use based on the Rayleigh quotient. Available calibration procedures are + Type of calibration method to use based on the Rayleigh quotient. Available + calibration procedures are ==================================== ================== Most recent Rayleigh quotient ``adhoc`` Running (weighted) mean ``weightedmean`` @@ -584,9 +613,8 @@ def _calibrate_uncertainty(self, S, sy, method): iters = np.arange(self.iter_ + 1) logR = np.log(sy) - np.log(np.einsum("nk,nk->k", S, S)) - if ( - self.iter_ > 1 - ): # only calibrate if enough iterations for a regression model have been performed + # only calibrate if enough iterations for a regression model have been performed + if self.iter_ > 1: if method == "adhoc": logR_pred = logR[-1] elif method == "weightedmean": @@ -595,8 +623,9 @@ def _calibrate_uncertainty(self, S, sy, method): deprecation_rate, self.iter_ + 1 ) ** np.arange(self.iter_ + 1) elif method == "gpkern": - # GP mean function via Weyl's result on spectra of Gram matrices for differentiable kernels - # ln(sigma(n)) ~= theta_0 - theta_1 ln(n) + # GP mean function via Weyl's result on spectra of Gram matrices for + # differentiable kernels + # ln(sigma(n)) ~= theta_0 - theta_1 ln(n) lnmap = GPy.core.Mapping(1, 1) lnmap.f = lambda n: np.log(n + 10 ** -16) lnmap.update_gradients = lambda a, b: None @@ -628,16 +657,19 @@ def _calibrate_uncertainty(self, S, sy, method): def _get_calibration_covariance_update_terms(self, phi=None, psi=None): """ - For the calibration covariance class set the calibration update terms of the covariance in the null spaces - of span(S) and span(Y) based on the degrees of freedom. + For the calibration covariance class set the calibration update terms of the + covariance in the null spaces of span(S) and span(Y) based on the degrees of + freedom. """ # Search directions and observations as arrays S = np.hstack(self.search_dir_list) Y = np.hstack(self.obs_list) def get_null_space_map(V, unc_scale): - """Returns a function mapping to the null space of span(V), scaling with a single degree of freedom - and mapping back.""" + """ + Returns a function mapping to the null space of span(V), scaling with a + single degree of freedom and mapping back. + """ def null_space_proj(x): try: @@ -646,15 +678,18 @@ def null_space_proj(x): except np.linalg.LinAlgError: return np.zeros_like(x) - # For a scalar uncertainty scale projecting to the null space twice is equivalent to projecting once + # For a scalar uncertainty scale projecting to the null space twice is + # equivalent to projecting once return lambda y: unc_scale * null_space_proj(y) - # Compute calibration term in the A view as a linear operator with scaling from degrees of freedom + # Compute calibration term in the A view as a linear operator with scaling from + # degrees of freedom calibration_term_A = linops.LinearOperator( shape=(self.n, self.n), matvec=get_null_space_map(V=S, unc_scale=phi) ) - # Compute calibration term in the Ainv view as a linear operator with scaling from degrees of freedom + # Compute calibration term in the Ainv view as a linear operator with scaling + # from degrees of freedom calibration_term_Ainv = linops.LinearOperator( shape=(self.n, self.n), matvec=get_null_space_map(V=Y, unc_scale=psi) ) @@ -662,7 +697,9 @@ def null_space_proj(x): return calibration_term_A, calibration_term_Ainv def _get_output_randvars(self, Y_list, sy_list, phi=None, psi=None): - """Return output random variables x, A, Ainv from their means and covariances.""" + """ + Return output random variables x, A, Ainv from their means and covariances. + """ if self.iter_ > 0: # Observations and inner products in A-space between actions @@ -682,8 +719,9 @@ def _matvec(x): def _matvec(x): # Term in covariance class: A_0^{-1}Y(Y'A_0^{-1}Y)^{-1}Y'A_0^{-1} - # TODO: for efficiency ensure that we dont have to compute (Y.T Y)^{-1} two times! For a scalar mean - # this is the same as in the null space projection + # TODO: for efficiency ensure that we dont have to compute + # (Y.T Y)^{-1} two times! For a scalar mean this is the same as in + # the null space projection YAinv0Y_inv_YAinv0x = np.linalg.solve( Y.T @ (self.Ainv_mean0 @ Y), Y.T @ (self.Ainv_mean0 @ x) ) @@ -693,7 +731,8 @@ def _matvec(x): shape=(self.n, self.n), matvec=_matvec ) - # Set degrees of freedom based on uncertainty calibration in unexplored space + # Set degrees of freedom based on uncertainty calibration in unexplored + # space ( calibration_term_A, calibration_term_Ainv, @@ -757,7 +796,9 @@ def _mv(x): return x, A, Ainv def _mean_update(self, u, v): - """Linear operator implementing the symmetric rank 2 mean update (+= uv' + vu').""" + """ + Linear operator implementing the symmetric rank 2 mean update (+= uv' + vu'). + """ def mv(x): return u @ (v.T @ x) + v @ (u.T @ x) @@ -765,7 +806,9 @@ def mv(x): return linops.LinearOperator(shape=(self.n, self.n), matvec=mv, matmat=mv) def _covariance_update(self, u, Ws): - """Linear operator implementing the symmetric rank 2 kernels update (-= Ws u^T).""" + """ + Linear operator implementing the symmetric rank 2 kernels update (-= Ws u^T). + """ def mv(x): return Ws @ (u.T @ x) @@ -781,9 +824,10 @@ def solve( Parameters ---------- callback : function, optional - User-supplied function called after each iteration of the linear solver. It is called as - ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid)`` and can be used to return quantities from the - iteration. Note that depending on the function supplied, this can slow down the solver. + User-supplied function called after each iteration of the linear solver. It + is called as ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid)`` and can be + used to return quantities from the iteration. Note that depending on the + function supplied, this can slow down the solver. maxiter : int Maximum number of iterations atol : float @@ -793,8 +837,8 @@ def solve( Relative residual tolerance. Stops if :math:`\\min(\\lVert r_i \\rVert, \\sqrt{\\operatorname{tr}(\\operatorname{Cov}(x))}) \\leq \\text{rtol} \\lVert b \\rVert`. calibration : str or float, default=False - If supplied calibrates the output via the given procedure or uncertainty scale. Available calibration - procedures / choices are + If supplied calibrates the output via the given procedure or uncertainty + scale. Available calibration procedures / choices are ==================================== ================ No calibration None @@ -807,14 +851,15 @@ def solve( Returns ------- x : RandomVariable, shape=(n,) or (n, nrhs) - Approximate solution :math:`x` to the linear system. Shape of the return matches the shape of ``b``. + Approximate solution :math:`x` to the linear system. Shape of the return + matches the shape of ``b``. A : RandomVariable, shape=(n,n) Posterior belief over the linear operator.calibrate Ainv : RandomVariable, shape=(n,n) Posterior belief over the linear operator inverse :math:`H=A^{-1}`. info : dict Information on convergence of the solver. - """ + """ # pylint: disable=line-too-long # Initialization self.iter_ = 0 resid = self.A @ self.x_mean - self.b @@ -974,8 +1019,9 @@ class NoisySymmetricMatrixBasedSolver(MatrixBasedSolver): """ Solver iteration of the noisy symmetric probabilistic linear solver. - Implements the solve iteration of the symmetric matrix-based probabilistic linear solver taking into account noisy - matrix-vector products :math:`y_k = (A + E_k)s_k` as described in [1]_ and [2]_. + Implements the solve iteration of the symmetric matrix-based probabilistic linear + solver taking into account noisy matrix-vector products :math:`y_k = (A + E_k)s_k` + as described in [1]_ and [2]_. Parameters ---------- @@ -984,14 +1030,17 @@ class NoisySymmetricMatrixBasedSolver(MatrixBasedSolver): b : array_like, shape=(n,) or (n, nrhs) Right-hand side vector or matrix in :math:`A x = b`. A0 : array-like or LinearOperator or RandomVariable, shape=(n, n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. + Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. This can be viewed as taking the form + of a pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) or (n, nrhs) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``Ainv0`` is given. + Optional. Prior belief for the solution of the linear system. Will be ignored if + ``Ainv0`` is given. Returns ------- @@ -1006,12 +1055,15 @@ class NoisySymmetricMatrixBasedSolver(MatrixBasedSolver): References ---------- - .. [1] Wenger, J., de Roos, F. and Hennig, P., Probabilistic Solution of Noisy Linear Systems, 2020 - .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on Optimization*, 2015, 25, 234-260 + .. [1] Wenger, J., de Roos, F. and Hennig, P., Probabilistic Solution of Noisy + Linear Systems, 2020 + .. [2] Hennig, P., Probabilistic Interpretation of Linear Solvers, *SIAM Journal on + Optimization*, 2015, 25, 234-260 See Also -------- - SymmetricMatrixBasedSolver : Class implementing the symmetric probabilistic linear solver. + SymmetricMatrixBasedSolver : + Class implementing the symmetric probabilistic linear solver. """ def __init__(self, A, b, A0=None, Ainv0=None, x0=None): @@ -1070,15 +1122,17 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Parameters ---------- A0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the linear operator - :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. - Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional - A square matrix, linear operator or random variable representing the prior belief over the inverse - :math:`H=A^{-1}`. This can be viewed as taking the form of a pre-conditioner. If an array or linear operator is + A square matrix, linear operator or random variable representing the prior + belief over the linear operator :math:`A`. If an array or linear operator is given, a prior distribution is chosen automatically. + Ainv0 : array-like or LinearOperator or RandomVariable, shape=(n,n), optional + A square matrix, linear operator or random variable representing the prior + belief over the inverse :math:`H=A^{-1}`. This can be viewed as taking the + form of a pre-conditioner. If an array or linear operator is given, a prior + distribution is chosen automatically. x0 : array-like, or RandomVariable, shape=(n,) - Optional. Prior belief for the solution of the linear system. Will be ignored if ``A0`` or ``Ainv0`` is - given. + Optional. Prior belief for the solution of the linear system. Will be + ignored if ``A0`` or ``Ainv0`` is given. b : RandomVariable, shape=(n,) or (n, nrhs) Right-hand side random variable `b` in :math:`A x = b`. @@ -1087,13 +1141,13 @@ def _get_prior_params(self, A0, Ainv0, x0, b): A0_mean : array-like or LinearOperator, shape=(n,n) Prior mean of the linear operator :math:`A`. A0_covfactor : array-like or LinearOperator, shape=(n,n) - Factor :math:`W^A` of the symmetric Kronecker product prior covariance :math:`W^A \\otimes_s W^A` of - :math:`A`. + Factor :math:`W^A` of the symmetric Kronecker product prior covariance + :math:`W^A \\otimes_s W^A` of :math:`A`. Ainv0_mean : array-like or LinearOperator, shape=(n,n) Prior mean of the linear operator :math:`H`. Ainv0_covfactor : array-like or LinearOperator, shape=(n,n) - Factor :math:`W^H` of the symmetric Kronecker product prior covariance :math:`W^H \\otimes_s W^H` of - :math:`H`. + Factor :math:`W^H` of the symmetric Kronecker product prior covariance + :math:`W^H \\otimes_s W^H` of :math:`H`. b_mean : array-like, shape=(n,nrhs) Prior mean of the right hand side :math:`b`. """ @@ -1109,9 +1163,9 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Ainv0_covfactor = linops.Identity(shape=self.n) # Standard normal covariance A0_mean = linops.Identity(shape=self.n) - A0_covfactor = linops.Identity( - shape=self.n - ) # TODO: should this be a sample from A to achieve symm. posterior correspondence? + A0_covfactor = linops.Identity(shape=self.n) + # TODO: should this be a sample from A to achieve symm. posterior + # correspondence? return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor, b_mean # Construct matrix priors from initial guess x0 elif isinstance(x0, np.ndarray): @@ -1125,9 +1179,9 @@ def _get_prior_params(self, A0, Ainv0, x0, b): ) Ainv0_covfactor = Ainv0_mean # Standard normal covariance - A0_covfactor = linops.Identity( - shape=self.n - ) # TODO: should this be a sample from A to achieve symm. posterior correspondence? + A0_covfactor = linops.Identity(shape=self.n) + # TODO: should this be a sample from A to achieve symm. posterior + # correspondence? return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor, b_mean elif isinstance(x0, prob.RandomVariable): raise NotImplementedError @@ -1149,20 +1203,21 @@ def _get_prior_params(self, A0, Ainv0, x0, b): A0_mean = Ainv0.inv() except AttributeError: warnings.warn( - message="Prior specified only for Ainv. Inverting prior mean naively. " - + "This operation is computationally costly! Specify an inverse prior (mean) instead." + "Prior specified only for Ainv. Inverting prior mean naively. " + "This operation is computationally costly! Specify an inverse " + "prior (mean) instead." ) A0_mean = np.linalg.inv(Ainv0.mean()) except NotImplementedError: A0_mean = linops.Identity(self.n) warnings.warn( - message="Prior specified only for Ainv. Automatic prior mean inversion not implemented, " - + "falling back to standard normal prior." + "Prior specified only for Ainv. Automatic prior mean inversion " + "not implemented, falling back to standard normal prior." ) # Standard normal covariance - A0_covfactor = linops.Identity( - shape=self.n - ) # TODO: should this be a sample from A to achieve symm. posterior correspondence? + A0_covfactor = linops.Identity(shape=self.n) + # TODO: should this be a sample from A to achieve symm. posterior + # correspondence? return A0_mean, A0_covfactor, Ainv0_mean, Ainv0_covfactor, b_mean # Prior on A specified @@ -1182,15 +1237,16 @@ def _get_prior_params(self, A0, Ainv0, x0, b): Ainv0_mean = A0.inv() except AttributeError: warnings.warn( - message="Prior specified only for A. Inverting prior mean naively. " - + "This operation is computationally costly! Specify an inverse prior (mean) instead." + "Prior specified only for A. Inverting prior mean naively. " + "This operation is computationally costly! Specify an inverse " + "prior (mean) instead." ) Ainv0_mean = np.linalg.inv(A0.mean()) except NotImplementedError: Ainv0_mean = linops.Identity(self.n) warnings.warn( - message="Prior specified only for A. " - + "Automatic prior mean inversion failed, falling back to standard normal prior." + "Prior specified only for A. Automatic prior mean inversion " + "failed, falling back to standard normal prior." ) # Symmetric posterior correspondence Ainv0_covfactor = Ainv0_mean @@ -1211,7 +1267,8 @@ def has_converged(self, iter, maxiter, atol=None, rtol=None): """ Check convergence of a linear solver. - Evaluates a set of convergence criteria based on its input arguments to decide whether the iteration has converged. + Evaluates a set of convergence criteria based on its input arguments to decide + whether the iteration has converged. Parameters ---------- @@ -1221,12 +1278,13 @@ def has_converged(self, iter, maxiter, atol=None, rtol=None): Maximum number of iterations atol : float Absolute tolerance for the uncertainty about the solution estimate. Stops if - :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{atol}`, where :math:`\\Sigma` is the covariance of the - solution :math:`x`. + :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{atol}`, where + :math:`\\Sigma` is the covariance of the solution :math:`x`. rtol : float Relative tolerance for the uncertainty about the solution estimate. Stops if - :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{rtol} \\lVert x_i \\rVert`, where :math:`\\Sigma` is the - covariance of the solution :math`x` and :math:`x_i` its mean. + :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{rtol} \\lVert x_i \\rVert`, + where :math:`\\Sigma` is the covariance of the solution :math`x` and + :math:`x_i` its mean. Returns ------- @@ -1238,7 +1296,7 @@ def has_converged(self, iter, maxiter, atol=None, rtol=None): # maximum iterations if iter >= maxiter: warnings.warn( - message="Iteration terminated. Solver reached the maximum number of iterations." + "Iteration terminated. Solver reached the maximum number of iterations." ) return True, "maxiter" # uncertainty-based @@ -1268,26 +1326,29 @@ def solve( Parameters ---------- callback : function, optional - User-supplied function called after each iteration of the linear solver. It is called as - ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, noise_scale)`` and can be used to return quantities from the - iteration. Note that depending on the function supplied, this can slow down the solver. + User-supplied function called after each iteration of the linear solver. It + is called as ``callback(xk, Ak, Ainvk, sk, yk, alphak, resid, noise_scale)`` + and can be used to return quantities from the iteration. Note that depending + on the function supplied, this can slow down the solver. maxiter : int Maximum number of iterations atol : float Absolute tolerance for the uncertainty about the solution estimate. Stops if - :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{atol}`, where :math:`\\Sigma` is the covariance of the - solution :math:`x`. + :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{atol}`, where + :math:`\\Sigma` is the covariance of the solution :math:`x`. rtol : float Relative tolerance for the uncertainty about the solution estimate. Stops if - :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{rtol} \\lVert x_i \\rVert`, where :math:`\\Sigma` is the - covariance of the solution :math`x` and :math:`x_i` its mean. + :math:`\\sqrt{\\text{tr}(\\Sigma)} \\leq \\text{rtol} \\lVert x_i \\rVert`, + where :math:`\\Sigma` is the covariance of the solution :math`x` and + :math:`x_i` its mean. noise_scale : float Assumed (initial) noise scale :math:`\\varepsilon^2`. Returns ------- x : RandomVariable, shape=(n,) or (n, nrhs) - Approximate solution :math:`x` to the linear system. Shape of the return matches the shape of ``b``. + Approximate solution :math:`x` to the linear system. Shape of the return + matches the shape of ``b``. A : RandomVariable, shape=(n,n) Posterior belief over the linear operator. Ainv : RandomVariable, shape=(n,n) diff --git a/src/probnum/linalg/linearsolvers/solutionbased.py b/src/probnum/linalg/linearsolvers/solutionbased.py index a52392153..9496fb8d3 100644 --- a/src/probnum/linalg/linearsolvers/solutionbased.py +++ b/src/probnum/linalg/linearsolvers/solutionbased.py @@ -1,8 +1,8 @@ """ Solution-based probabilistic linear solvers. -Implementations of solution-based linear solvers which perform inference on the solution of a linear system given linear -observations. +Implementations of solution-based linear solvers which perform inference on the solution +of a linear system given linear observations. """ import warnings @@ -26,7 +26,8 @@ class SolutionBasedSolver(ProbabilisticLinearSolver): References ---------- - .. [1] Cockayne, J. et al., A Bayesian Conjugate Gradient Method, *Bayesian Analysis*, 2019, 14, 937-1012 + .. [1] Cockayne, J. et al., A Bayesian Conjugate Gradient Method, *Bayesian + Analysis*, 2019, 14, 937-1012 """ def __init__(self, A, b, x0=None): @@ -37,7 +38,8 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): """ Check convergence of a linear solver. - Evaluates a set of convergence criteria based on its input arguments to decide whether the iteration has converged. + Evaluates a set of convergence criteria based on its input arguments to decide + whether the iteration has converged. Parameters ---------- @@ -46,11 +48,14 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): maxiter : int Maximum number of iterations resid : array-like - Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of the current iteration. + Residual vector :math:`\\lVert r_i \\rVert = \\lVert Ax_i - b \\rVert` of + the current iteration. atol : float - Absolute residual tolerance. Stops if :math:`\\lVert r_i \\rVert < \\text{atol}`. + Absolute residual tolerance. Stops if + :math:`\\lVert r_i \\rVert < \\text{atol}`. rtol : float - Relative residual tolerance. Stops if :math:`\\lVert r_i \\rVert < \\text{rtol} \\lVert b \\rVert`. + Relative residual tolerance. Stops if + :math:`\\lVert r_i \\rVert < \\text{rtol} \\lVert b \\rVert`. Returns ------- @@ -62,7 +67,7 @@ def has_converged(self, iter, maxiter, resid=None, atol=None, rtol=None): # maximum iterations if iter >= maxiter: warnings.warn( - message="Iteration terminated. Solver reached the maximum number of iterations." + "Iteration terminated. Solver reached the maximum number of iterations." ) return True, "maxiter" # residual below error tolerance diff --git a/src/probnum/linalg/linops/__init__.py b/src/probnum/linalg/linops/__init__.py index aff2256ea..25cf54ac4 100644 --- a/src/probnum/linalg/linops/__init__.py +++ b/src/probnum/linalg/linops/__init__.py @@ -1,8 +1,9 @@ """ (Finite-dimensional) Linear Operators. -This package implements a variety of finite dimensional linear operators. These have the advantage of only implementing -a matrix-vector product instead of representing the full linear operator as a matrix in memory. +This package implements a variety of finite dimensional linear operators. These have the +advantage of only implementing a matrix-vector product instead of representing the full +linear operator as a matrix in memory. """ from probnum.linalg.linops.linearoperators import * diff --git a/src/probnum/linalg/linops/kronecker.py b/src/probnum/linalg/linops/kronecker.py index a58adfde4..6ac1e201f 100644 --- a/src/probnum/linalg/linops/kronecker.py +++ b/src/probnum/linalg/linops/kronecker.py @@ -12,7 +12,8 @@ class Symmetrize(LinearOperator): """ Symmetrizes a vector in its matrix representation. - Given a vector x=vec(X) representing a square matrix X, this linear operator computes y=vec(1/2(X + X^T)). + Given a vector x=vec(X) representing a square matrix X, this linear operator + computes y=vec(1/2(X + X^T)). Parameters ---------- @@ -35,13 +36,14 @@ class Vec(LinearOperator): """ Vectorization operator. - The column- or row-wise vectorization operator stacking the columns or rows of a matrix representation of a - linear operator into a vector. + The column- or row-wise vectorization operator stacking the columns or rows of a + matrix representation of a linear operator into a vector. Parameters ---------- order : str - Stacking order to apply. One of ``row`` or ``col``. Defaults to column-wise stacking. + Stacking order to apply. One of ``row`` or ``col``. Defaults to column-wise + stacking. dim : int Either number of rows or columns, depending on the vectorization ``order``. """ @@ -70,9 +72,10 @@ class Svec(LinearOperator): """ Symmetric vectorization operator. - The column- or row-wise symmetric normalized vectorization operator :math:`\\operatorname{svec}` [1]_ stacking the - (normalized) lower/upper triangular components of a symmetric matrix of a linear operator into a - vector. It is defined by + The column- or row-wise symmetric normalized vectorization operator + :math:`\\operatorname{svec}` [1]_ stacking the (normalized) lower/upper triangular + components of a symmetric matrix of a linear operator into a vector. It is defined + by .. math:: \\operatorname{svec}(S) = \\begin{bmatrix} @@ -95,17 +98,18 @@ class Svec(LinearOperator): dim : int Dimension of the symmetric matrix to be reshaped. check_symmetric : bool, default=False - Check whether the given matrix or vector corresponds to a symmetric matrix argument. Note, this option can slow - down performance. + Check whether the given matrix or vector corresponds to a symmetric matrix + argument. Note, this option can slow down performance. Notes ----- - It holds that :math:`Q\\operatorname{svec}(S) = \\operatorname{vec}(S)`, where :math:`Q` is a unique matrix with - orthonormal rows. + It holds that :math:`Q\\operatorname{svec}(S) = \\operatorname{vec}(S)`, where + :math:`Q` is a unique matrix with orthonormal rows. References ---------- - .. [1] De Klerk, E., Aspects of Semidefinite Programming, *Kluwer Academic Publishers*, 2002 + .. [1] De Klerk, E., Aspects of Semidefinite Programming, *Kluwer Academic + Publishers*, 2002 """ def __init__(self, dim, check_symmetric=False): @@ -130,14 +134,17 @@ def _matvec(self, x): return X[ind] def _matmat(self, X): - """Vectorizes X if of dimension n^2, otherwise applies Svec to each column of X.""" + """ + Vectorizes X if of dimension n^2, otherwise applies Svec to each column of X. + """ if np.shape(X)[0] == np.shape(X)[1] == self._dim: return self._matvec(X.ravel()) elif np.shape(X)[0] == self._dim * self._dim: return np.hstack([self._matvec(col.reshape(-1, 1)) for col in X.T]) else: raise ValueError( - "Dimension mismatch. Argument must be either a (n x n) matrix or (n^2 x k)" + "Dimension mismatch. Argument must be either a (n x n) matrix or " + "(n^2 x k)" ) @@ -145,7 +152,8 @@ class Kronecker(LinearOperator): """ Kronecker product of two linear operators. - The Kronecker product [1]_ :math:`A \\otimes B` of two linear operators :math:`A` and :math:`B` is given by + The Kronecker product [1]_ :math:`A \\otimes B` of two linear operators :math:`A` + and :math:`B` is given by .. math:: A \\otimes B = \\begin{bmatrix} @@ -154,10 +162,12 @@ class Kronecker(LinearOperator): A_{n_11} B & \\dots & A_{n_1 m_1} B \\end{bmatrix} - where :math:`A_{ij}v=A(v_j e_i)`, where :math:`e_i` is the :math:`i^{\\text{th}}` unit vector. The result is a new linear - operator mapping from :math:`\\mathbb{R}^{n_1n_2}` to :math:`\\mathbb{R}^{m_1m_2}`. By recognizing that - :math:`(A \\otimes B)\\operatorname{vec}(X) = AXB^{\\top}`, the Kronecker product can be understood as "translation" - between matrix multiplication and (row-wise) vectorization. + where :math:`A_{ij}v=A(v_j e_i)`, where :math:`e_i` is the :math:`i^{\\text{th}}` + unit vector. The result is a new linear operator mapping from + :math:`\\mathbb{R}^{n_1n_2}` to :math:`\\mathbb{R}^{m_1m_2}`. By recognizing that + :math:`(A \\otimes B)\\operatorname{vec}(X) = AXB^{\\top}`, the Kronecker product + can be understood as "translation" between matrix multiplication and (row-wise) + vectorization. Parameters ---------- @@ -170,8 +180,8 @@ class Kronecker(LinearOperator): References ---------- - .. [1] Van Loan, C. F., The ubiquitous Kronecker product, *Journal of Computational and Applied Mathematics*, 2000, - 123, 85-100 + .. [1] Van Loan, C. F., The ubiquitous Kronecker product, *Journal of Computational + and Applied Mathematics*, 2000, 123, 85-100 See Also -------- @@ -193,7 +203,8 @@ def __init__(self, A, B, dtype=None): def _matvec(self, X): """ - Efficient multiplication via (A (x) B)vec(X) = vec(AXB^T) where vec is the row-wise vectorization operator. + Efficient multiplication via (A (x) B)vec(X) = vec(AXB^T) where vec is the + row-wise vectorization operator. """ X = X.reshape(self.A.shape[1], self.B.shape[1]) Y = self.B.matmat(X.T) @@ -255,33 +266,38 @@ class SymmetricKronecker(LinearOperator): """ Symmetric Kronecker product of two linear operators. - The symmetric Kronecker product [1]_ :math:`A \\otimes_{s} B` of two square linear operators :math:`A` and - :math:`B` maps a symmetric linear operator :math:`X` to :math:`\\mathbb{R}^{\\frac{1}{2}n (n+1)}`. It is given by + The symmetric Kronecker product [1]_ :math:`A \\otimes_{s} B` of two square linear + operators :math:`A` and :math:`B` maps a symmetric linear operator :math:`X` to + :math:`\\mathbb{R}^{\\frac{1}{2}n (n+1)}`. It is given by .. math:: (A \\otimes_{s} B)\\operatorname{svec}(X) = \\frac{1}{2} \\operatorname{svec}(AXB^{\\top} + BXA^{\\top}) - where :math:`\\operatorname{svec}(X) = (X_{11}, \\sqrt{2} X_{12}, \\dots, X_{1n}, X_{22}, \\sqrt{2} X_{23}, - \\dots, \\sqrt{2}X_{2n}, \\dots X_{nn})^{\\top}` is the (row-wise, normalized) symmetric stacking operator. The - implementation is based on the relationship :math:`Q^\\top \\operatorname{svec}(X) = \\operatorname{vec}(X)` with an - orthonormal matrix :math:`Q` [2]_. + where :math:`\\operatorname{svec}(X) = (X_{11}, \\sqrt{2} X_{12}, \\dots, X_{1n}, + X_{22}, \\sqrt{2} X_{23}, \\dots, \\sqrt{2}X_{2n}, \\dots X_{nn})^{\\top}` is the + (row-wise, normalized) symmetric stacking operator. The implementation is based on + the relationship :math:`Q^\\top \\operatorname{svec}(X) = \\operatorname{vec}(X)` + with an orthonormal matrix :math:`Q` [2]_. Note ---- - The symmetric Kronecker product has a symmetric matrix representation if both :math:`A` and :math:`B` are symmetric. + The symmetric Kronecker product has a symmetric matrix representation if both + :math:`A` and :math:`B` are symmetric. References ---------- - .. [1] Van Loan, C. F., The ubiquitous Kronecker product, *Journal of Computational and Applied Mathematics*, 2000, - 123, 85-100 - .. [2] De Klerk, E., Aspects of Semidefinite Programming, *Kluwer Academic Publishers*, 2002 + .. [1] Van Loan, C. F., The ubiquitous Kronecker product, *Journal of Computational + and Applied Mathematics*, 2000, 123, 85-100 + .. [2] De Klerk, E., Aspects of Semidefinite Programming, *Kluwer Academic + Publishers*, 2002 See Also -------- Kronecker : The Kronecker product of two linear operators. - """ + """ # pylint: disable=line-too-long - # TODO: update documentation to map from n2xn2 to matrices of rank 1/2n(n+1), representation symmetric n2xn2 + # TODO: update documentation to map from n2xn2 to matrices of rank 1/2n(n+1), + # representation symmetric n2xn2 def __init__(self, A, B=None, dtype=None): # Set parameters @@ -303,8 +319,8 @@ def __init__(self, A, B=None, dtype=None): def _matvec(self, x): """ - Efficient multiplication via (A (x)_s B)vec(X) = 1/2 vec(BXA^T + AXB^T) where vec is the column-wise normalized - symmetric stacking operator. + Efficient multiplication via (A (x)_s B)vec(X) = 1/2 vec(BXA^T + AXB^T) where + vec is the column-wise normalized symmetric stacking operator. """ # vec(x) X = x.reshape(self._n, self._n) @@ -326,7 +342,8 @@ def _rmatvec(self, x): Y = 0.5 * (Y1 + Y2) return Y.ravel() - # TODO: add efficient implementation of _matmat based on (Symmetric) Kronecker properties + # TODO: add efficient implementation of _matmat based on (Symmetric) Kronecker + # properties def todense(self): """Dense representation of the symmetric Kronecker product""" diff --git a/src/probnum/linalg/linops/linearoperators.py b/src/probnum/linalg/linops/linearoperators.py index 576524e6b..7e3ace187 100644 --- a/src/probnum/linalg/linops/linearoperators.py +++ b/src/probnum/linalg/linops/linearoperators.py @@ -1,12 +1,14 @@ """ Finite dimensional linear operators. -This module defines classes and methods that implement finite dimensional linear operators. It can be used to do linear -algebra with (structured) matrices without explicitly representing them in memory. This often allows for the definition -of a more efficient matrix-vector product. Linear operators can be applied, added, multiplied, transposed, and more as -one would expect from matrix algebra. - -Several algorithms in the :mod:`probnum.linalg` library are able to operate on :class:`LinearOperator` instances. +This module defines classes and methods that implement finite dimensional linear +operators. It can be used to do linear algebra with (structured) matrices without +explicitly representing them in memory. This often allows for the definition of a more +efficient matrix-vector product. Linear operators can be applied, added, multiplied, +transposed, and more as one would expect from matrix algebra. + +Several algorithms in the :mod:`probnum.linalg` library are able to operate on +:class:`LinearOperator` instances. """ import warnings @@ -19,25 +21,28 @@ class LinearOperator(scipy.sparse.linalg.LinearOperator): """ Finite dimensional linear operators. - This class provides a way to define finite dimensional linear operators without explicitly constructing a matrix - representation. Instead it suffices to define a matrix-vector product and a shape attribute. This avoids unnecessary - memory usage and can often be more convenient to derive. + This class provides a way to define finite dimensional linear operators without + explicitly constructing a matrix representation. Instead it suffices to define a + matrix-vector product and a shape attribute. This avoids unnecessary memory usage + and can often be more convenient to derive. - LinearOperator instances can be multiplied, added and exponentiated. This happens lazily: the result of these - operations is a new, composite LinearOperator, that defers linear operations to the original operators and combines - the results. + LinearOperator instances can be multiplied, added and exponentiated. This happens + lazily: the result of these operations is a new, composite LinearOperator, that + defers linear operations to the original operators and combines the results. - To construct a concrete LinearOperator, either pass appropriate callables to the constructor of this class, or - subclass it. + To construct a concrete LinearOperator, either pass appropriate callables to the + constructor of this class, or subclass it. - A subclass must implement either one of the methods ``_matvec`` and ``_matmat``, and the - attributes/properties ``shape`` (pair of integers) and ``dtype`` (may be ``None``). It may call the ``__init__`` on - this class to have these attributes validated. Implementing ``_matvec`` automatically implements ``_matmat`` (using - a naive algorithm) and vice-versa. + A subclass must implement either one of the methods ``_matvec`` and ``_matmat``, and + the attributes/properties ``shape`` (pair of integers) and ``dtype`` (may be + ``None``). It may call the ``__init__`` on this class to have these attributes + validated. Implementing ``_matvec`` automatically implements ``_matmat`` (using a + naive algorithm) and vice-versa. - Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint`` to implement the Hermitian adjoint (conjugate - transpose). As with ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or ``_adjoint`` implements the - other automatically. Implementing ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for backwards + Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint`` to implement the + Hermitian adjoint (conjugate transpose). As with ``_matvec`` and ``_matmat``, + implementing either ``_rmatvec`` or ``_adjoint`` implements the other automatically. + Implementing ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for backwards compatibility. This class inherits from :class:`scipy.sparse.linalg.LinearOperator`. @@ -49,7 +54,8 @@ class LinearOperator(scipy.sparse.linalg.LinearOperator): matvec : callable f(v) Returns :math:`A v`. rmatvec : callable f(v) - Returns :math:`A^H v`, where :math:`A^H` is the conjugate transpose of :math:`A`. + Returns :math:`A^H v`, where :math:`A^H` is the conjugate transpose of + :math:`A`. matmat : callable f(V) Returns :math:`AV`, where :math:`V` is a dense matrix with dimensions (N, K). dtype : dtype @@ -97,7 +103,8 @@ def __new__(cls, *args, **kwargs): return obj - # Overload arithmetic operators to give access to newly implemented functions (e.g. todense()) + # Overload arithmetic operators to give access to newly implemented functions (e.g. + # todense()) def __rmul__(self, x): if np.isscalar(x): return _ScaledLinearOperator(self, x) @@ -122,10 +129,12 @@ def __neg__(self): # The below methods are overloaded to allow dot products with random variables def dot(self, x): """Matrix-matrix or matrix-vector multiplication. + Parameters ---------- x : array_like 1-d or 2-d array, representing a vector or matrix. + Returns ------- Ax : array @@ -206,7 +215,8 @@ def todense(self): """ Dense matrix representation of the linear operator. - This method can be computationally very costly depending on the shape of the linear operator. Use with caution. + This method can be computationally very costly depending on the shape of the + linear operator. Use with caution. Returns ------- @@ -232,8 +242,9 @@ def cond(self, p=None): """ Compute the condition number of the linear operator. - The condition number of the linear operator with respect to the ``p`` norm. It measures how much the solution - :math:`x` of the linear system :math:`Ax=b` changes with respect to small changes in :math:`b`. + The condition number of the linear operator with respect to the ``p`` norm. It + measures how much the solution :math:`x` of the linear system :math:`Ax=b` + changes with respect to small changes in :math:`b`. Parameters ---------- @@ -269,7 +280,8 @@ def trace(self): """ Trace of the linear operator. - Computes the trace of a square linear operator :math:`\\text{tr}(A) = \\sum_{i-1}^n A_ii`. + Computes the trace of a square linear operator :math:`\\text{tr}(A) = + \\sum_{i-1}^n A_ii`. Returns ------- @@ -311,7 +323,8 @@ def __init__( ) -# TODO: inheritance from _TransposedLinearOperator causes dependency on scipy>=1.4, maybe implement our own instead? +# TODO: inheritance from _TransposedLinearOperator causes dependency on scipy>=1.4, +# maybe implement our own instead? class _TransposedLinearOperator( scipy.sparse.linalg.interface._TransposedLinearOperator, LinearOperator ): @@ -403,7 +416,8 @@ class Diagonal(LinearOperator): """ # TODO: should this be an operator itself or a function of a LinearOperator? - # - a function allows subclasses (e.g. MatrixMult) to implement more efficient versions than n products e_i A e_i + # - a function allows subclasses (e.g. MatrixMult) to implement more efficient + # versions than n products e_i A e_i def __init__(self, Op): # pylint: disable=super-init-not-called raise NotImplementedError @@ -570,8 +584,8 @@ def aslinop(A): Parameters ---------- A : array-like or LinearOperator or RandomVariable or object - Argument to be represented as a linear operator. When `A` is an object it needs to have the attributes `.shape` - and `.matvec`. + Argument to be represented as a linear operator. When `A` is an object it needs + to have the attributes `.shape` and `.matvec`. Notes ----- diff --git a/src/probnum/prob/__init__.py b/src/probnum/prob/__init__.py index b01cf4b20..314152b6f 100644 --- a/src/probnum/prob/__init__.py +++ b/src/probnum/prob/__init__.py @@ -1,9 +1,9 @@ """ Probability and statistics. -This package implements functionality related to probability theory and statistics such as random variables and -distributions. Random variables are the primary in- and outputs of probabilistic numerical methods. A generic signature -of such methods looks like this: +This package implements functionality related to probability theory and statistics such +as random variables and distributions. Random variables are the primary in- and outputs +of probabilistic numerical methods. A generic signature of such methods looks like this: .. highlight:: python .. code-block:: python diff --git a/src/probnum/prob/distributions/distribution.py b/src/probnum/prob/distributions/distribution.py index 228dfc45f..464fa2ffb 100644 --- a/src/probnum/prob/distributions/distribution.py +++ b/src/probnum/prob/distributions/distribution.py @@ -1,7 +1,8 @@ """ Probability Distribution. -This module provides a class implementing a probability distribution along with its properties. +This module provides a class implementing a probability distribution along with its +properties. """ import numpy as np @@ -45,17 +46,18 @@ class Distribution: shape : tuple Shape of samples from this distribution. dtype : numpy.dtype or object - Data type of realizations of a random variable with this distribution. If ``object`` will be converted to ``numpy.dtype``. + Data type of realizations of a random variable with this distribution. If + ``object`` will be converted to ``numpy.dtype``. random_state : None or int or :class:`~numpy.random.RandomState` instance, optional - This parameter defines the RandomState object to use for drawing - realizations from this distribution. - If None (or np.random), the global np.random state is used. - If integer, it is used to seed the local :class:`~numpy.random.RandomState` instance. - Default is None. + This parameter defines the RandomState object to use for drawing realizations + from this distribution. If None (or np.random), the global np.random state is + used. If integer, it is used to seed the local + :class:`~numpy.random.RandomState` instance. Default is None. See Also -------- - RandomVariable : Random variables are the main objects used by probabilistic numerical methods. + RandomVariable : + Random variables are the main objects used by probabilistic numerical methods. Examples -------- @@ -152,10 +154,10 @@ def dtype(self, newtype): def random_state(self): """Random state of the distribution. - This attribute defines the RandomState object to use for drawing - realizations from this distribution. - If None (or np.random), the global np.random state is used. - If integer, it is used to seed the local :class:`~numpy.random.RandomState` instance. + This attribute defines the RandomState object to use for drawing realizations + from this distribution. If None (or np.random), the global np.random state is + used. If integer, it is used to seed the local + :class:`~numpy.random.RandomState` instance. """ return self._random_state @@ -163,10 +165,10 @@ def random_state(self): def random_state(self, seed): """ Get or set the RandomState object of the underlying distribution. - This can be either None or an existing RandomState object. - If None (or np.random), use the RandomState singleton used by np.random. - If already a RandomState instance, use it. - If an int, use a new RandomState instance seeded with seed. + This can be either None or an existing RandomState object. If None (or + np.random), use the RandomState singleton used by np.random. If already a + RandomState instance, use it. If an int, use a new RandomState instance seeded + with seed. """ self._random_state = scipy._lib._util.check_random_state(seed) @@ -175,7 +177,8 @@ def parameters(self): """ Parameters of the probability distribution. - The parameters of the distribution such as mean, variance, et cetera stored in a ``dict``. + The parameters of the distribution such as mean, variance, et cetera stored in a + ``dict``. """ if self._parameters is not None: return self._parameters @@ -229,9 +232,8 @@ def logpdf(self, x): return np.log(self._pdf(x)) else: raise NotImplementedError( - "The function 'logpdf' is not implemented for object of class {}".format( - type(self).__name__ - ) + "The function 'logpdf' is not implemented for object of class " + "{}".format(type(self).__name__) ) def cdf(self, x): @@ -254,9 +256,8 @@ def cdf(self, x): return np.exp(self._logcdf(x)) else: raise NotImplementedError( - "The function 'cdf' is not implemented for object of class {}".format( - type(self).__name__ - ) + "The function 'cdf' is not implemented for object of class " + "{}".format(type(self).__name__) ) def logcdf(self, x): @@ -279,9 +280,8 @@ def logcdf(self, x): return np.log(self._cdf(x)) else: raise NotImplementedError( - "The function 'logcdf' is not implemented for object of class {}".format( - type(self).__name__ - ) + "The function 'logcdf' is not implemented for object of class " + "{}".format(type(self).__name__) ) def sample(self, size=()): @@ -302,9 +302,8 @@ def sample(self, size=()): return self._sample(size=size) else: raise NotImplementedError( - "The function 'sample' is not implemented for object of class {}.".format( - type(self).__name__ - ) + "The function 'sample' is not implemented for object of class " + "{}.".format(type(self).__name__) ) def median(self): @@ -331,9 +330,8 @@ def mode(self): return self._parameters["mode"] else: raise NotImplementedError( - "The function 'mode' is not implemented for object of class {}.".format( - type(self).__name__ - ) + "The function 'mode' is not implemented for object of class " + "{}.".format(type(self).__name__) ) def mean(self): @@ -358,28 +356,29 @@ def mean(self): def cov(self): """ - Covariance :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` of the - distribution. + Covariance + :math:`\\operatorname{Cov}(X) = \\mathbb{E}((X-\\mathbb{E}(X))(X-\\mathbb{E}(X))^\\top)` + of the distribution. Returns ------- cov : array-like The kernels of the distribution. - """ + """ # pylint: disable=line-too-long if self._cov is not None: return self._cov() elif "cov" in self._parameters: return self._parameters["cov"] else: raise NotImplementedError( - "The function 'cov' is not implemented for object of class {}".format( - type(self).__name__ - ) + "The function 'cov' is not implemented for object of class " + "{}".format(type(self).__name__) ) def var(self): """ - Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` of the distribution. + Variance :math:`\\operatorname{Var}(X) = \\mathbb{E}((X-\\mathbb{E}(X))^2)` + of the distribution. Returns ------- @@ -413,8 +412,8 @@ def reshape(self, newshape): Parameters ---------- newshape : int or tuple of ints - New shape for the realizations and parameters of this distribution. It must be compatible with the original - shape. + New shape for the realizations and parameters of this distribution. It must + be compatible with the original shape. Returns ------- diff --git a/src/probnum/prob/distributions/normal.py b/src/probnum/prob/distributions/normal.py index 7a1f8e7f9..2bb3a4e43 100644 --- a/src/probnum/prob/distributions/normal.py +++ b/src/probnum/prob/distributions/normal.py @@ -201,7 +201,8 @@ def __add__(self, other): elif isinstance(other, type(self)): if self.random_state is not None and other.random_state is not None: warnings.warn( - "When adding random variables with set random states only the first is preserved." + "When adding random variables with set random states " + "only the first is preserved." ) try: return Normal( @@ -223,7 +224,8 @@ def __sub__(self, other): elif isinstance(other, type(self)): if self.random_state is not None and other.random_state is not None: warnings.warn( - "When adding random variables with set random states only the first is preserved." + "When adding random variables with set random states " + "only the first is preserved." ) try: return Normal( @@ -345,7 +347,8 @@ def __pos__(self): def __abs__(self): try: - # todo: add absolute moments of normal (see: https://arxiv.org/pdf/1209.4340.pdf) + # todo: add absolute moments of normal + # (see: https://arxiv.org/pdf/1209.4340.pdf) return Distribution( parameters={}, sample=lambda size: operator.abs(self.sample(size=size)), @@ -671,8 +674,9 @@ def transpose(self, *axes): # Arithmetic Operations - # TODO: implement special rules for matrix-variate RVs and Kronecker structured covariances - # (see e.g. p.64 Thm. 2.3.10 of Gupta: Matrix-variate Distributions) + # TODO: implement special rules for matrix-variate RVs and + # Kronecker structured covariances + # (see e.g. p.64 Thm. 2.3.10 of Gupta: Matrix-variate Distributions) def __matmul__(self, other): if isinstance(other, Dirac): othermean = other.mean() diff --git a/src/probnum/prob/randomvariable.py b/src/probnum/prob/randomvariable.py index b89a30f81..15155fb42 100644 --- a/src/probnum/prob/randomvariable.py +++ b/src/probnum/prob/randomvariable.py @@ -1,8 +1,8 @@ """ Random Variables. -This module implements random variables. Random variables are the main in- and outputs of probabilistic numerical -methods. +This module implements random variables. Random variables are the main in- and outputs +of probabilistic numerical methods. """ import operator @@ -21,23 +21,27 @@ class RandomVariable: """ Random variables are the main objects used by probabilistic numerical methods. - Every probabilistic numerical method takes a random variable encoding the prior distribution as input and outputs a - random variable whose distribution encodes the uncertainty arising from finite computation. The generic signature - of a probabilistic numerical method is: + Every probabilistic numerical method takes a random variable encoding the prior + distribution as input and outputs a random variable whose distribution encodes the + uncertainty arising from finite computation. The generic signature of a + probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` - In practice, most random variables used by methods in ProbNum have Dirac or Gaussian measure. + In practice, most random variables used by methods in ProbNum have Dirac or Gaussian + measure. - Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and linear operators. This may - change their ``distribution`` and not necessarily all previously available methods are retained. + Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and + linear operators. This may change their ``distribution`` and not necessarily all + previously available methods are retained. Parameters ---------- shape : tuple Shape of realizations of this random variable. dtype : numpy.dtype or object - Data type of realizations of this random variable. If ``object`` will be converted to ``numpy.dtype``. + Data type of realizations of this random variable. If ``object`` will be + converted to ``numpy.dtype``. distribution : Distribution Probability distribution of the random variable. @@ -69,7 +73,8 @@ def _set_distribution(self, distribution): "The distribution parameter must be an " "instance of `Distribution`." ) - # TODO: add some type checking (e.g. for shape as a tuple of ints) and extract as function + # TODO: add some type checking (e.g. for shape as a tuple of ints) and extract as + # function def _set_shape(self, distribution, shape): """ Sets shape in accordance with distribution. @@ -170,10 +175,11 @@ def random_state(self): def random_state(self, seed): """ Get or set the RandomState object of the underlying distribution. - This can be either None or an existing :class:`~numpy.random.RandomState` object. - If None (or np.random), use the :class:`~numpy.random.RandomState` singleton used by np.random. - If already a :class:`~numpy.random.RandomState` instance, use it. - If an int, use a new :class:`~numpy.random.RandomState` instance seeded with seed. + This can be either None or an existing :class:`~numpy.random.RandomState` + object. If None (or np.random), use the :class:`~numpy.random.RandomState` + singleton used by np.random. If already a :class:`~numpy.random.RandomState` + instance, use it. If an int, use a new :class:`~numpy.random.RandomState` + instance seeded with seed. """ self.distribution._random_state = scipy._lib._util.check_random_state(seed) @@ -203,7 +209,8 @@ def reshape(self, newshape): Parameters ---------- newshape : int or tuple of ints - New shape for the random variable. It must be compatible with the original shape. + New shape for the random variable. It must be compatible with the original + shape. Returns ------- @@ -305,7 +312,8 @@ def __rpow__(self, power, modulo=None): other_rv = asrandvar(power) return other_rv._rv_from_binary_operation(other=self, op=operator.pow) - # Augmented arithmetic assignments (+=, -=, *=, ...) attempting to do the operation in place + # Augmented arithmetic assignments (+=, -=, *=, ...) attempting to do the operation + # in place # TODO: needs setter functions for properties `shape` and `dtype` to do in place def __iadd__(self, other): return NotImplemented @@ -352,7 +360,8 @@ def asrandvar(obj): """ Return ``obj`` as a :class:`RandomVariable`. - Converts scalars, (sparse) arrays or distribution classes to a :class:`RandomVariable`. + Converts scalars, (sparse) arrays or distribution classes to a + :class:`RandomVariable`. Parameters ---------- @@ -424,7 +433,7 @@ def _scipystats_to_rv(scipydist): dist : RandomVariable ProbNum random variable. - """ + """ # pylint: disable=line-too-long # Univariate distributions (implemented in this package) if isinstance(scipydist, scipy.stats._distn_infrastructure.rv_frozen): # Normal distribution diff --git a/src/probnum/quad/__init__.py b/src/probnum/quad/__init__.py index a67ac9d78..4926c8295 100644 --- a/src/probnum/quad/__init__.py +++ b/src/probnum/quad/__init__.py @@ -1,10 +1,11 @@ """ Quadrature, i.e. numerical integration. -This module collects both classic and Bayesian quadrature rules used for numerical integration of functions. +This module collects both classic and Bayesian quadrature rules used for numerical +integration of functions. -Bayesian quadrature methods integrate a function by iteratively building a probabilistic model and using its predictions -to adaptively choose points to evaluate the integrand. +Bayesian quadrature methods integrate a function by iteratively building a probabilistic +model and using its predictions to adaptively choose points to evaluate the integrand. """ from probnum.quad.quadrature import * diff --git a/src/probnum/quad/bayesian/bayesquadrature.py b/src/probnum/quad/bayesian/bayesquadrature.py index 5ee508f0f..3876a7948 100644 --- a/src/probnum/quad/bayesian/bayesquadrature.py +++ b/src/probnum/quad/bayesian/bayesquadrature.py @@ -1,10 +1,11 @@ """ Bayesian Quadrature. -This module provides routines to integrate functions through Bayesian quadrature, meaning a model over the integrand -is constructed in order to actively select evaluation points of the integrand to estimate the value of the integral. -Bayesian quadrature methods return a random variable with a distribution, specifying the belief about the true value of -the integral. +This module provides routines to integrate functions through Bayesian quadrature, +meaning a model over the integrand is constructed in order to actively select evaluation +points of the integrand to estimate the value of the integral. Bayesian quadrature +methods return a random variable with a distribution, specifying the belief about the +true value of the integral. """ from probnum.quad.quadrature import Quadrature @@ -40,7 +41,8 @@ def bayesquad(fun, fun0, bounds, nevals=None, type="vanilla", **kwargs): F : RandomVariable The integral of ``func`` from ``a`` to ``b``. fun0 : RandomProcess - Stochastic process modelling the function to be integrated after ``neval`` observations. + Stochastic process modelling the function to be integrated after ``neval`` + observations. info : dict Information on the performance of the method. @@ -97,7 +99,8 @@ def nbayesquad(fun, fun0, domain, nevals=None, type=None, **kwargs): F : RandomVariable The integral of ``func`` on the domain. fun0 : RandomProcess - Stochastic process modelling the function to be integrated after ``neval`` observations. + Stochastic process modelling the function to be integrated after ``neval`` + observations. info : dict Information on the performance of the method. @@ -115,7 +118,8 @@ class BayesianQuadrature(Quadrature): """ An abstract base class for Bayesian quadrature methods. - This class is designed to be subclassed by implementations of Bayesian quadrature with an :meth:`integrate` method. + This class is designed to be subclassed by implementations of Bayesian quadrature + with an :meth:`integrate` method. """ def integrate(self, fun, fun0, domain, nevals, **kwargs): diff --git a/src/probnum/quad/polynomial/clenshawcurtis.py b/src/probnum/quad/polynomial/clenshawcurtis.py index b9de72c30..aa0b01b17 100644 --- a/src/probnum/quad/polynomial/clenshawcurtis.py +++ b/src/probnum/quad/polynomial/clenshawcurtis.py @@ -4,8 +4,8 @@ This module implements the Clenshaw-Curtis quadrature rule and associated functions. Formula for nodes and weights: - [1] Sparse Grid Quadrature in High Dimensions with Applications in Finance and Insurance - Holtz, M., Springer, 2010(, Chapter 3, p. 42ff) + [1] Sparse Grid Quadrature in High Dimensions with Applications in Finance and + Insurance Holtz, M., Springer, 2010(, Chapter 3, p. 42ff) URL: https://books.google.de/books?id=XOfMm-4ZM9AC&pg=PA42&lpg=PA42&dq=filippi+formu la+clenshaw+curtis&source=bl&ots=gkhNu9F1fp&sig=ACfU3U3zdH-OHx0PqqB_KAXb1mM5iXI @@ -62,7 +62,8 @@ class ClenshawCurtis(PolynomialQuadrature): References ---------- - .. [1] Holtz, M., Sparse Grid Quadrature in High Dimensions with Applications in Finance and Insurance, Springer, 2010 + .. [1] Holtz, M., Sparse Grid Quadrature in High Dimensions with Applications in + Finance and Insurance, Springer, 2010 Examples @@ -99,7 +100,7 @@ class ClenshawCurtis(PolynomialQuadrature): >>> print(cc.integrate(lambda x: np.sin(x))) [0.45969769] - """ + """ # pylint: disable=line-too-long def __init__(self, npts_per_dim, ndim, bounds): utils.assert_is_2d_ndarray(bounds) @@ -136,7 +137,7 @@ def _compute_weights_1d(npts, ndim, ilbds1d): The :math:`i^\textrm{th}` weight is given by .. math:: w_i = \\frac{2}{n+1} \\sin\\left(\\frac{i \\pi}{n+1}\\right)\\sum_{j=1}^{(n+1)/2} \\frac{1}{2j-1}\\sin\\left(\\frac{(2j-1)i \\pi}{n+1}\\right). - """ + """ # pylint: disable=line-too-long if npts % 2 == 0: raise ValueError("Please enter odd npts") nhalfpts = int((npts + 1.0) / 2.0) @@ -193,7 +194,7 @@ def _compute_nodes_1d(npts, ilbds1d): ------- np.ndarray, shape (npts,) 1d CC nodes in ilbds1d - """ + """ # pylint: disable=line-too-long if npts % 2 == 0: raise ValueError("Please enter odd npts") ind = np.arange(1, npts + 1) diff --git a/src/probnum/quad/polynomial/polynomialquadrature.py b/src/probnum/quad/polynomial/polynomialquadrature.py index d78aeea60..94207996b 100644 --- a/src/probnum/quad/polynomial/polynomialquadrature.py +++ b/src/probnum/quad/polynomial/polynomialquadrature.py @@ -1,7 +1,8 @@ """ Quadrature rules based on polynomial functions. -Class of quadrature rules derived by constructing polynomial functions which are simple to integrate. +Class of quadrature rules derived by constructing polynomial functions which are simple +to integrate. """ from probnum.quad.quadrature import Quadrature @@ -60,7 +61,8 @@ def integrate(self, fun, isvectorized=False, **kwargs): vectorization is recommended wherever possible for improved speed of computation. isvectorized : bool - Whether integrand allows vectorised evaluation (i.e. evaluation of all nodes at once). + Whether integrand allows vectorised evaluation (i.e. evaluation of all nodes + at once). kwargs Key-word arguments being passed down to ``fun`` at each evaluation. For example (hyper)parameters. diff --git a/src/probnum/utils/arrayutils.py b/src/probnum/utils/arrayutils.py index c56c65dbc..1781c6f35 100644 --- a/src/probnum/utils/arrayutils.py +++ b/src/probnum/utils/arrayutils.py @@ -98,8 +98,8 @@ def as_colvec(vec): """ Transform the given vector or random variable to column format. - Given a vector (or random variable) of dimension (n,) return an array with dimensions - (n, 1) instead. Higher-dimensional arrays are not changed. + Given a vector (or random variable) of dimension (n,) return an array with + dimensions (n, 1) instead. Higher-dimensional arrays are not changed. Parameters ---------- From 193ad6abff02f5670120c45e7e6af0067996b61e Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 08:33:41 +0200 Subject: [PATCH 24/44] Only lint `./src` for now, not `./test` --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8ef27f5f8..a1bde641e 100644 --- a/tox.ini +++ b/tox.ini @@ -50,4 +50,6 @@ commands = basepython = python3 description = Code linting with pylint deps = pylint -commands = pylint src test +commands = + pylint src + # pylint test From 46b4672eb0c80521b3dc08677a95b6da32f5544e Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 08:59:48 +0200 Subject: [PATCH 25/44] Fixed "empty-docstring" in ./tests --- src/probnum/diffeq/ode/ivp.py | 2 - .../diffeq/odefiltsmooth/ivp2filter.py | 5 +- .../diffeq/odefiltsmooth/odefiltsmooth.py | 19 +++--- src/probnum/diffeq/odefiltsmooth/prior.py | 1 - .../gaussfiltsmooth/extendedkalman.py | 2 +- .../gaussfiltsmooth/unscentedkalman.py | 2 +- src/probnum/quad/quadrature.py | 3 +- tests/test_diffeq/test_ode/test_ivp.py | 24 ------- .../test_odefiltsmooth/test_odefiltsmooth.py | 34 ---------- .../test_odefiltsmooth/test_prior.py | 27 -------- tests/test_diffeq/test_steprule.py | 13 ---- .../filtsmooth_testcases.py | 7 --- .../test_extendedkalman.py | 22 ------- .../test_gaussfiltsmooth/test_kalman.py | 26 -------- .../test_unscentedkalman.py | 22 ------- .../test_unscentedtransform.py | 18 ------ .../test_continuous/test_continuousmodel.py | 15 ----- .../test_continuous/test_linearsdemodel.py | 19 ------ .../test_discretegaussianmodel.py | 63 ------------------- .../test_discrete/test_discretemodel.py | 19 ------ .../test_polynomial/test_clenshawcurtis.py | 3 +- tests/test_quad/test_quadrature.py | 7 --- tests/test_utils/test_arrayutils.py | 16 ----- tests/test_utils/test_fctutils.py | 15 +---- 24 files changed, 14 insertions(+), 370 deletions(-) diff --git a/src/probnum/diffeq/ode/ivp.py b/src/probnum/diffeq/ode/ivp.py index bc33444a4..cb28e9357 100644 --- a/src/probnum/diffeq/ode/ivp.py +++ b/src/probnum/diffeq/ode/ivp.py @@ -284,8 +284,6 @@ class IVP(ODE): """ def __init__(self, timespan, initrv, rhs, jac=None, hess=None, sol=None): - """ - """ self.initrv = initrv super().__init__(timespan=timespan, rhs=rhs, jac=jac, hess=hess, sol=sol) diff --git a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py index 10ac65678..309a9b4e0 100644 --- a/src/probnum/diffeq/odefiltsmooth/ivp2filter.py +++ b/src/probnum/diffeq/odefiltsmooth/ivp2filter.py @@ -144,8 +144,7 @@ def ivp2ukf(ivp, prior, evlvar): def _measmod_ukf(ivp, prior, measvar): - """ - """ + spatialdim = prior.spatialdim h0 = prior.proj2coord(coord=0) h1 = prior.proj2coord(coord=1) @@ -204,7 +203,7 @@ def _initialdistribution(ivp, prior): def _initialdistribution_no_precond(ivp, prior): - """ """ + x0 = ivp.initialdistribution.mean() dx0 = ivp.rhs(ivp.t0, x0) ddx0 = _ddx(ivp.t0, x0, ivp) diff --git a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py index 3b96fc982..3969ee54e 100644 --- a/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py +++ b/src/probnum/diffeq/odefiltsmooth/odefiltsmooth.py @@ -255,7 +255,7 @@ def _create_solver_object( def _check_step_tol(step, tol): - """ """ + both_none = tol is None and step is None both_not_none = tol is not None and step is not None if both_none or both_not_none: @@ -264,14 +264,13 @@ def _check_step_tol(step, tol): def _check_method(method): - """ """ + if method not in ["ekf0", "ekf1", "ukf", "eks0", "eks1", "uks"]: raise ValueError("Method not supported.") def _string2prior(ivp, which_prior, precond_step, **kwargs): - """ - """ + ibm_family = ["ibm1", "ibm2", "ibm3", "ibm4"] ioup_family = ["ioup1", "ioup2", "ioup3", "ioup4"] matern_family = ["matern32", "matern52", "matern72", "matern92"] @@ -286,8 +285,7 @@ def _string2prior(ivp, which_prior, precond_step, **kwargs): def _string2ibm(ivp, which_prior, precond_step, **kwargs): - """ - """ + if "diffconst" in kwargs.keys(): diffconst = kwargs["diffconst"] else: @@ -305,8 +303,7 @@ def _string2ibm(ivp, which_prior, precond_step, **kwargs): def _string2ioup(ivp, which_prior, precond_step, **kwargs): - """ - """ + if "diffconst" in kwargs.keys(): diffconst = kwargs["diffconst"] else: @@ -328,8 +325,7 @@ def _string2ioup(ivp, which_prior, precond_step, **kwargs): def _string2matern(ivp, which_prior, precond_step, **kwargs): - """ - """ + if "diffconst" in kwargs.keys(): diffconst = kwargs["diffconst"] else: @@ -351,8 +347,7 @@ def _string2matern(ivp, which_prior, precond_step, **kwargs): def _string2filter(_ivp, _prior, _method, **kwargs): - """ - """ + if "evlvar" in kwargs.keys(): evlvar = kwargs["evlvar"] else: diff --git a/src/probnum/diffeq/odefiltsmooth/prior.py b/src/probnum/diffeq/odefiltsmooth/prior.py index 993824eca..8aaafd9b3 100644 --- a/src/probnum/diffeq/odefiltsmooth/prior.py +++ b/src/probnum/diffeq/odefiltsmooth/prior.py @@ -78,7 +78,6 @@ class ODEPrior(LTISDEModel): """ def __init__(self, driftmat, dispmat, ordint, spatialdim, precond_step=1.0): - """ """ self.ordint = ordint self.spatialdim = spatialdim self.precond, self.invprecond = self.precond2nordsieck(precond_step) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py index 71ec35660..e552e8246 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/extendedkalman.py @@ -21,7 +21,7 @@ class ExtendedKalman(GaussFiltSmooth): """ def __new__(cls, dynamod, measmod, initrv, **kwargs): - """ """ + if cls is ExtendedKalman: if _cont_disc(dynamod, measmod): return _ContDiscExtendedKalman(dynamod, measmod, initrv, **kwargs) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py index eaf26c10e..13574f003 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedkalman.py @@ -25,7 +25,7 @@ class UnscentedKalman(GaussFiltSmooth): """ def __new__(cls, dynamod, measmod, initrv, alpha, beta, kappa, **kwargs): - """ """ + if cls is UnscentedKalman: if _cont_disc(dynamod, measmod): return _ContDiscUnscentedKalman( diff --git a/src/probnum/quad/quadrature.py b/src/probnum/quad/quadrature.py index 5d2e959be..64f6b4375 100644 --- a/src/probnum/quad/quadrature.py +++ b/src/probnum/quad/quadrature.py @@ -67,8 +67,7 @@ class Quadrature(abc.ABC): """ def __init__(self): - """ - """ + pass def integrate(self, fun, **kwargs): """ diff --git a/tests/test_diffeq/test_ode/test_ivp.py b/tests/test_diffeq/test_ode/test_ivp.py index 2b2e16306..e824f53d4 100644 --- a/tests/test_diffeq/test_ode/test_ivp.py +++ b/tests/test_diffeq/test_ode/test_ivp.py @@ -1,7 +1,3 @@ -""" - -""" - import unittest import numpy as np from probnum.diffeq.ode import ivp @@ -18,7 +14,6 @@ class TestExamples(unittest.TestCase, NumpyAssertions): """ def setUp(self): - """ """ self.tspan = (0.0, 4.212) def test_logistic(self): @@ -49,8 +44,6 @@ def test_logistic_jacobian(self): ) def test_fitzhughnagumo(self): - """ - """ rv = RandomVariable(distribution=Dirac(np.ones(2))) lg1 = ivp.fitzhughnagumo(self.tspan, rv) self.assertEqual(issubclass(type(lg1), ivp.IVP), True) @@ -75,8 +68,6 @@ def test_fitzhughnagumo_jacobian(self): ) def test_lotkavolterra(self): - """ - """ rv = RandomVariable(distribution=Dirac(np.ones(2))) lg1 = ivp.lotkavolterra(self.tspan, rv) self.assertEqual(issubclass(type(lg1), ivp.IVP), True) @@ -102,12 +93,7 @@ def test_lotkavolterra_jacobian(self): class TestIVP(unittest.TestCase): - """ - """ - def setUp(self): - """ """ - def rhs_(t, x): return -x @@ -124,16 +110,12 @@ def sol_(t): ) def test_rhs(self): - """ - """ some_x = np.random.rand(TEST_NDIM) some_t = np.random.rand() out = self.mockivp.rhs(some_t, some_x) self.assertEqual(len(out), TEST_NDIM) def test_jacobian(self): - """ - """ some_x = np.random.rand(TEST_NDIM) some_t = np.random.rand() out = self.mockivp.jacobian(some_t, some_x) @@ -141,19 +123,13 @@ def test_jacobian(self): self.assertEqual(out.shape[1], TEST_NDIM) def test_solution(self): - """ - """ some_t = np.random.rand() out = self.mockivp.solution(some_t) self.assertEqual(out.ndim, 1) self.assertEqual(out.shape[0], TEST_NDIM) def test_initialdistribution(self): - """ - """ __ = self.mockivp.initialdistribution def test_timespan(self): - """ - """ __, __ = self.mockivp.timespan diff --git a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py index 991949d52..fcdd15a61 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py @@ -130,7 +130,6 @@ class TestFirstIterations(unittest.TestCase, NumpyAssertions): """ def setUp(self): - """ """ initrv = RandomVariable(distribution=Dirac(0.1 * np.ones(1))) self.ivp = ode.logistic([0.0, 1.5], initrv) self.step = 0.5 @@ -141,7 +140,6 @@ def setUp(self): self.ms, self.cs = state_rvs.mean(), state_rvs.cov() def test_t0(self): - """ """ exp_mean = np.array( [self.ivp.initrv.mean(), self.ivp.rhs(0, self.ivp.initrv.mean())] ) @@ -207,13 +205,9 @@ def setUp(self): self.step = 0.1 def test_filter_ivp_ioup1_kf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="ioup1", method="ekf0") def test_filter_ivp_ioup2_ekf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="ioup2", method="ekf1") def test_filter_ivp_ioup3_ukf(self): @@ -226,8 +220,6 @@ def test_filter_ivp_ioup3_ukf(self): ) def test_filter_ivp_h_ioup1_ekf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="ioup1", method="ekf1") def test_filter_ivp_h_ioup2_ukf(self): @@ -240,18 +232,12 @@ def test_filter_ivp_h_ioup2_ukf(self): ) def test_filter_ivp_h_ioup3_kf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="ioup3", method="ekf0") def test_filter_ivp_mat32_kf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="matern32", method="ekf0") def test_filter_ivp_mat52_ekf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="matern52", method="ekf1") def test_filter_ivp_mat72_ukf(self): @@ -264,8 +250,6 @@ def test_filter_ivp_mat72_ukf(self): ) def test_filter_ivp_h_mat32_ekf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="matern32", method="ekf1") def test_filter_ivp_h_mat52_ukf(self): @@ -278,8 +262,6 @@ def test_filter_ivp_h_mat52_ukf(self): ) def test_filter_ivp_h_mat72_kf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="matern72", method="ekf0") @@ -429,13 +411,9 @@ def setUp(self): self.step = 0.1 def test_filter_ivp_ioup1_kf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="ioup1", method="eks0") def test_filter_ivp_ioup2_ekf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="ioup2", method="eks1") def test_filter_ivp_ioup3_ukf(self): @@ -448,8 +426,6 @@ def test_filter_ivp_ioup3_ukf(self): ) def test_filter_ivp_h_ioup1_ekf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="ioup1", method="eks1") def test_filter_ivp_h_ioup2_ukf(self): @@ -462,18 +438,12 @@ def test_filter_ivp_h_ioup2_ukf(self): ) def test_filter_ivp_h_ioup3_kf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="ioup3", method="eks0") def test_filter_ivp_mat32_kf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="matern32", method="eks0") def test_filter_ivp_mat52_ekf(self): - """ - """ probsolve_ivp(self.ivp, tol=self.tol, which_prior="matern52", method="eks1") def test_filter_ivp_mat72_ukf(self): @@ -486,8 +456,6 @@ def test_filter_ivp_mat72_ukf(self): ) def test_filter_ivp_h_mat32_ekf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="matern32", method="eks1") def test_filter_ivp_h_mat52_ukf(self): @@ -500,6 +468,4 @@ def test_filter_ivp_h_mat52_ukf(self): ) def test_filter_ivp_h_mat72_kf(self): - """ - """ probsolve_ivp(self.ivp, step=self.step, which_prior="matern72", method="eks0") diff --git a/tests/test_diffeq/test_odefiltsmooth/test_prior.py b/tests/test_diffeq/test_odefiltsmooth/test_prior.py index 9246e75f8..8d93550d8 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_prior.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_prior.py @@ -52,17 +52,10 @@ class TestIBM(unittest.TestCase, NumpyAssertions): - """ - """ - def setUp(self): - """ - """ self.ibm = prior.IBM(2, 2, DIFFCONST) def test_chapmankolmogorov(self): - """ - """ mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) initrv = RandomVariable(distribution=Normal(mean, cov)) cke, __ = self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) @@ -73,19 +66,12 @@ def test_chapmankolmogorov(self): class TestIBMPrecond(unittest.TestCase, NumpyAssertions): - """ - """ - def setUp(self): - """ - """ self.ibm = prior.IBM( ordint=2, spatialdim=1, diffconst=DIFFCONST, precond_step=STEP ) def test_chapmankolmogorov(self): - """ - """ mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) initrv = RandomVariable(distribution=Normal(mean, cov)) cke, __ = self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) @@ -97,18 +83,11 @@ def test_chapmankolmogorov(self): class TestIOUP(unittest.TestCase, NumpyAssertions): - """ - """ - def setUp(self): - """ - """ driftspeed = np.random.rand() self.ibm = prior.IOUP(2, 2, driftspeed, DIFFCONST) def test_chapmankolmogorov(self): - """ - """ mean, cov = np.ones(self.ibm.ndim), np.eye(self.ibm.ndim) initrv = RandomVariable(distribution=Normal(mean, cov)) self.ibm.chapmankolmogorov(0.0, STEP, STEP, initrv) @@ -131,8 +110,6 @@ class TestMatern(unittest.TestCase, NumpyAssertions): """ def setUp(self): - """ - """ lenscale, diffconst = np.random.rand(), np.random.rand() self.mat0 = prior.Matern(0, 1, lenscale, diffconst) self.mat1 = prior.Matern(1, 1, lenscale, diffconst) @@ -163,14 +140,10 @@ def test_n2(self): self.assertAllClose(self.mat2.driftmatrix[-1, :], expected) def test_larger_shape(self): - """ - """ mat2d = prior.Matern(2, 2, 1.0, 1.0) self.assertEqual(mat2d.ndim, 2 * (2 + 1)) def test_chapmankolmogorov(self): - """ - """ mean, cov = np.ones(self.mat1.ndim), np.eye(self.mat1.ndim) initrv = RandomVariable(distribution=Normal(mean, cov)) self.mat1.chapmankolmogorov(0.0, STEP, STEP, initrv) diff --git a/tests/test_diffeq/test_steprule.py b/tests/test_diffeq/test_steprule.py index b6dfd207c..c97403681 100644 --- a/tests/test_diffeq/test_steprule.py +++ b/tests/test_diffeq/test_steprule.py @@ -1,6 +1,3 @@ -""" - -""" from probnum.diffeq import steprule import unittest import numpy as np @@ -15,20 +12,14 @@ class TestConstantStep(unittest.TestCase): """ def setUp(self): - """ - """ self.step = random_state.rand() self.sr = steprule.ConstantSteps(self.step) def test_suggest(self): - """ - """ stp = self.sr.suggest(self.step, np.nan) self.assertEqual(stp, self.step) def test_is_accepted(self): - """ - """ isacc = self.sr.is_accepted(np.inf, np.nan) self.assertEqual(isacc, True) @@ -47,15 +38,11 @@ def setUp(self): self.asr = steprule.AdaptiveSteps(self.tol, 3) def test_is_accepted(self): - """ - """ suggstep = random_state.rand() errorest = suggstep ** 3 / 3 self.assertEqual(self.asr.is_accepted(suggstep, errorest), False) def test_propose(self): - """ - """ step = 0.25 * random_state.rand() errorest = step sugg = self.asr.suggest(step, errorest) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py index 11b3ddb7b..2b55d203d 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py @@ -36,7 +36,6 @@ class CarTrackingDDTestCase(unittest.TestCase, NumpyAssertions): cov = 0.5 * var * np.eye(4) def setup_cartracking(self): - """ """ self.dynmod = DiscreteGaussianLTIModel( dynamat=self.dynamat, forcevec=np.zeros(4), diffmat=self.dynadiff ) @@ -63,8 +62,6 @@ class OrnsteinUhlenbeckCDTestCase(unittest.TestCase, NumpyAssertions): diff = q * np.eye(1) def setup_ornsteinuhlenbeck(self): - """ - """ self.dynmod = LTISDEModel( driftmatrix=self.drift, force=self.force, @@ -82,11 +79,7 @@ def setup_ornsteinuhlenbeck(self): class PendulumNonlinearDDTestCase(unittest.TestCase, NumpyAssertions): - """ """ - def setup_pendulum(self): - """ - """ delta_t = 0.0075 var = 0.32 ** 2 g = 9.81 diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py index 249fede54..fbdeb60d0 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py @@ -1,6 +1,3 @@ -""" -""" - import scipy.linalg from probnum.filtsmooth.gaussfiltsmooth import * @@ -20,19 +17,13 @@ class TestExtendedKalmanDiscDisc(CarTrackingDDTestCase): """ def setUp(self): - """ - """ super().setup_cartracking() self.method = ExtendedKalman(self.dynmod, self.measmod, self.initrv) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): @@ -105,35 +96,24 @@ class TestExtendedKalmanContDisc(OrnsteinUhlenbeckCDTestCase): """ def setUp(self): - """ """ super().setup_ornsteinuhlenbeck() self.method = ExtendedKalman(self.dynmod, self.measmod, self.initrv) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): - """ - """ self.assertEqual(self.initrv, self.method.initialrandomvariable) def test_predict_shape(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) self.assertEqual(pred.mean().shape, (1,)) self.assertEqual(pred.cov().shape, (1, 1)) def test_predict_value(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) ah = scipy.linalg.expm(self.delta_t * self.drift) qh = ( @@ -147,8 +127,6 @@ def test_predict_value(self): self.assertApproxEqual(expectedcov, pred.cov()) def test_update(self): - """ - """ data = self.measmod.sample(0.0, self.initrv.mean() * np.ones(1)) upd, __, __, __ = self.method.update(0.0, self.initrv, data) self.assertEqual(upd.mean().shape, (1,)) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py index f7eb89b2e..082d2d358 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py @@ -1,4 +1,3 @@ -"""""" import scipy.linalg from probnum.filtsmooth.gaussfiltsmooth import * @@ -18,29 +17,19 @@ class TestKalmanDiscreteDiscrete(CarTrackingDDTestCase): """ def setUp(self): - """ - """ super().setup_cartracking() self.method = Kalman(self.dynmod, self.measmod, self.initrv) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): - """ - """ self.assertEqual(self.initrv, self.method.initialrandomvariable) def test_predict(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) self.assertEqual(pred.mean().ndim, 1) self.assertEqual(pred.mean().shape[0], 4) @@ -49,8 +38,6 @@ def test_predict(self): self.assertEqual(pred.cov().shape[1], 4) def test_update(self): - """ - """ data = self.measmod.sample(0.0, self.initrv.mean()) upd, __, __, __ = self.method.update(0.0, self.initrv, data) self.assertEqual(upd.mean().ndim, 1) @@ -108,35 +95,24 @@ class TestKalmanContinuousDiscrete(OrnsteinUhlenbeckCDTestCase): """ def setUp(self): - """ """ super().setup_ornsteinuhlenbeck() self.method = Kalman(self.dynmod, self.measmod, self.initrv) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): - """ - """ self.assertEqual(self.initrv, self.method.initialrandomvariable) def test_predict_shape(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) self.assertEqual(pred.mean().shape, (1,)) self.assertEqual(pred.cov().shape, (1, 1)) def test_predict_value(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) ah = scipy.linalg.expm(self.delta_t * self.drift) qh = ( @@ -150,8 +126,6 @@ def test_predict_value(self): self.assertApproxEqual(expectedcov, pred.cov()) def test_update(self): - """ - """ data = np.array([self.measmod.sample(0.0, self.initrv.mean() * np.ones(1))]) upd, __, __, __ = self.method.update(0.0, self.initrv, data) self.assertEqual(upd.mean().shape, (1,)) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py index 03bd3ce85..b69f116c2 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py @@ -1,6 +1,3 @@ -"""""" - - import scipy.linalg from probnum.filtsmooth.gaussfiltsmooth import * @@ -20,8 +17,6 @@ class TestUnscentedKalmanDiscDisc(CarTrackingDDTestCase): """ def setUp(self): - """ - """ super().setup_cartracking() alpha, beta, kappa = np.ones(3) self.method = UnscentedKalman( @@ -29,13 +24,9 @@ def setUp(self): ) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): @@ -108,7 +99,6 @@ class TestUnscentedKalmanContDisc(OrnsteinUhlenbeckCDTestCase): """ def setUp(self): - """ """ super().setup_ornsteinuhlenbeck() alpha, beta, kappa = np.ones(3) self.method = UnscentedKalman( @@ -116,30 +106,20 @@ def setUp(self): ) def test_dynamicmodel(self): - """ - """ self.assertEqual(self.dynmod, self.method.dynamicmodel) def test_measurementmodel(self): - """ - """ self.assertEqual(self.measmod, self.method.measurementmodel) def test_initialdistribution(self): - """ - """ self.assertEqual(self.initrv, self.method.initialrandomvariable) def test_predict_shape(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) self.assertEqual(pred.mean().shape, (1,)) self.assertEqual(pred.cov().shape, (1, 1)) def test_predict_value(self): - """ - """ pred, __ = self.method.predict(0.0, self.delta_t, self.initrv) ah = scipy.linalg.expm(self.delta_t * self.drift) qh = ( @@ -153,8 +133,6 @@ def test_predict_value(self): self.assertApproxEqual(expectedcov, pred.cov()) def test_update(self): - """ - """ data = self.measmod.sample(0.0, self.initrv.mean() * np.ones(1)) upd, __, __, __ = self.method.update(0.0, self.initrv, data) self.assertEqual(upd.mean().shape, (1,)) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedtransform.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedtransform.py index d17a6751e..aaf7eaf7f 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedtransform.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedtransform.py @@ -1,6 +1,3 @@ -""" -""" - import unittest import numpy as np @@ -9,12 +6,7 @@ class TestUnscentedTransform(unittest.TestCase): - """ - """ - def setUp(self): - """ - """ self.ndim = np.random.randint(1, 33) # 1 < random int < 33 alpha, beta, kappa = np.random.rand(3) self.ut = unscentedtransform.UnscentedTransform(self.ndim, alpha, beta, kappa) @@ -23,24 +15,18 @@ def setUp(self): self.covar = cvr @ cvr.T def test_weights_shape(self): - """ - """ self.assertEqual(self.ut.mweights.ndim, 1) self.assertEqual(self.ut.mweights.shape[0], 2 * self.ndim + 1) self.assertEqual(self.ut.cweights.ndim, 1) self.assertEqual(self.ut.cweights.shape[0], 2 * self.ndim + 1) def test_sigpts_shape(self): - """ - """ sigpts = self.ut.sigma_points(self.mean, self.covar) self.assertEqual(sigpts.ndim, 2) self.assertEqual(sigpts.shape[0], 2 * self.ndim + 1) self.assertEqual(sigpts.shape[1], self.ndim) def test_propagate_shape(self): - """ - """ sigpts = self.ut.sigma_points(self.mean, self.covar) propagated = self.ut.propagate(None, sigpts, lambda t, x: np.sin(x)) self.assertEqual(propagated.ndim, 2) @@ -48,8 +34,6 @@ def test_propagate_shape(self): self.assertEqual(propagated.shape[1], self.ndim) def test_estimate_statistics_shape(self): - """ - """ sigpts = self.ut.sigma_points(self.mean, self.covar) proppts = self.ut.propagate(None, sigpts, lambda t, x: np.sin(x)) mest, cest, ccest = self.ut.estimate_statistics( @@ -65,8 +49,6 @@ def test_estimate_statistics_shape(self): self.assertEqual(ccest.shape[1], self.ndim) def test_transform_of_gaussian_exact(self): - """ - """ sigpts = self.ut.sigma_points(self.mean, self.covar) ndim_meas = self.ndim + 1 # != self.ndim is important transmtrx = np.random.rand(ndim_meas, self.ndim) diff --git a/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py b/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py index 789e0e3c4..c1ac830ac 100644 --- a/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py +++ b/tests/test_filtsmooth/test_statespace/test_continuous/test_continuousmodel.py @@ -1,6 +1,3 @@ -""" -""" - import unittest import numpy as np @@ -60,13 +57,9 @@ class TestContinuousModel(unittest.TestCase): """ def setUp(self): - """ - """ self.mcm = MockContinuousModel() def test_sample(self): - """ - """ mean, cov = np.zeros(TEST_NDIM), np.eye(TEST_NDIM) randvar = RandomVariable(distribution=Normal(mean, cov)) samp = self.mcm.sample(0.0, 1.0, 0.01, randvar.mean()) @@ -79,8 +72,6 @@ def test_sample(self): plt.show() def test_ndim(self): - """ - """ self.assertEqual(self.mcm.ndim, TEST_NDIM) @@ -131,20 +122,14 @@ class TestDeterministicModel(unittest.TestCase): """ def setUp(self): - """ - """ dm = DeterministicModel() randvar = RandomVariable(distribution=Dirac(np.ones(TEST_NDIM))) self.samp = dm.sample(0.0, 1.0, 0.01, randvar.mean()) def test_sample_shape(self): - """ - """ self.assertEqual(self.samp.ndim, 1) self.assertEqual(self.samp.shape[0], TEST_NDIM) def test_sample_vals(self): - """ - """ diff = np.abs(np.exp(1) - self.samp[0]) self.assertLess(diff, 1e-1) diff --git a/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py b/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py index dceb0a4b4..da1e3bb22 100644 --- a/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py +++ b/tests/test_filtsmooth/test_statespace/test_continuous/test_linearsdemodel.py @@ -1,5 +1,3 @@ -""" -""" import unittest import numpy as np @@ -18,7 +16,6 @@ class TestLinearSDEModel(unittest.TestCase): """ def setUp(self): - """ """ self.driftmat = np.random.rand(TEST_NDIM, TEST_NDIM) self.dispmat = np.random.rand(TEST_NDIM) self.diffmat = self.driftmat @ self.driftmat.T + np.eye(TEST_NDIM) @@ -31,7 +28,6 @@ def setUp(self): ) def test_drift(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lm.drift(0.0, some_state) - ( self.driftmat @ some_state + self.force @@ -39,28 +35,23 @@ def test_drift(self): self.assertLess(np.linalg.norm(diff), 1e-14) def test_disp(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lm.dispersion(0.0, some_state) - self.dispmat self.assertLess(np.linalg.norm(diff), 1e-14) def test_jac(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lm.jacobian(0.0, some_state) - self.driftmat self.assertLess(np.linalg.norm(diff), 1e-14) def test_diff(self): - """ """ diffusion = self.lm.diffusionmatrix self.assertLess(np.linalg.norm(diffusion - self.diffmat), 1e-14) def test_ndim(self): - """ """ self.assertEqual(self.lm.ndim, TEST_NDIM) def test_sample(self): - """ """ samp = self.lm.sample(0.0, 1.0, 0.1, (np.ones(TEST_NDIM))) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) @@ -116,7 +107,6 @@ class TestLTISDEModel(unittest.TestCase): """ def setUp(self): - """ """ self.driftmat = np.diag(np.ones(TEST_NDIM - 1), 1) self.dispmat = 1.5 * np.eye(TEST_NDIM)[:, -1].reshape((TEST_NDIM, 1)) self.diffmat = np.eye(1) @@ -126,7 +116,6 @@ def setUp(self): ) def test_drift(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lti.drift(0.0, some_state) - ( self.driftmat @ some_state + self.force @@ -134,42 +123,34 @@ def test_drift(self): self.assertLess(np.linalg.norm(diff), 1e-14) def test_disp(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lti.dispersion(0.0, some_state) - self.dispmat self.assertLess(np.linalg.norm(diff), 1e-14) def test_jac(self): - """ """ some_state = np.random.rand(TEST_NDIM) diff = self.lti.jacobian(0.0, some_state) - self.driftmat self.assertLess(np.linalg.norm(diff), 1e-14) def test_diff(self): - """ """ diffusion = self.lti.diffusionmatrix self.assertLess(np.linalg.norm(diffusion - self.diffmat), 1e-14) def test_ndim(self): - """ """ self.assertEqual(self.lti.ndim, TEST_NDIM) def test_sample(self): - """ """ samp = self.lti.sample(0.0, 1.0, 0.1, (np.ones(TEST_NDIM))) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) def test_driftmatrix(self): - """ """ self.assertLess(np.linalg.norm(self.lti.driftmatrix - self.driftmat), 1e-14) def test_force(self): - """ """ self.assertLess(np.linalg.norm(self.lti.force - self.force), 1e-14) def test_dispmatrix(self): - """ """ self.assertLess(np.linalg.norm(self.lti.dispersionmatrix - self.dispmat), 1e-14) def test_chapmankolmogorov(self): diff --git a/tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py b/tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py index c9cd462ea..ddc0cf7b9 100644 --- a/tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py +++ b/tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py @@ -1,7 +1,3 @@ -""" - -""" - import unittest import numpy as np @@ -12,12 +8,7 @@ class TestDiscreteGaussianModel(unittest.TestCase): - """ - """ - def setUp(self): - """ - """ dynamat = np.random.rand(TEST_NDIM, TEST_NDIM) diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM) self.nl = discretegaussianmodel.DiscreteGaussianModel( @@ -25,56 +16,39 @@ def setUp(self): ) def test_dynamics(self): - """ - """ some_input = np.random.rand(TEST_NDIM) val = self.nl.dynamics(0.0, some_input) self.assertEqual(val.ndim, 1) self.assertEqual(val.shape[0], TEST_NDIM) def test_diffusionmatrix(self): - """ - """ val = self.nl.diffusionmatrix(0.0) self.assertEqual(val.ndim, 2) self.assertEqual(val.shape[0], TEST_NDIM) self.assertEqual(val.shape[1], TEST_NDIM) def test_jacobian(self): - """ - """ some_input = np.random.rand(TEST_NDIM) with self.assertRaises(NotImplementedError): self.nl.jacobian(0.0, some_input) def test_sample(self): - """ - """ some_input = np.random.rand(TEST_NDIM) samp = self.nl.sample(0.0, some_input) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) def test_ndim(self): - """ - """ self.assertEqual(self.nl.ndim, TEST_NDIM) def test_pdf(self): - """ - """ some_state = np.random.rand(TEST_NDIM) evl = self.nl.pdf(some_state, 0.0, some_state) self.assertEqual(np.isscalar(evl), True) class TestLinear(unittest.TestCase): - """ - """ - def setUp(self): - """ - """ dynamat = np.random.rand(TEST_NDIM, TEST_NDIM) diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM) self.lin = discretegaussianmodel.DiscreteGaussianLinearModel( @@ -82,24 +56,18 @@ def setUp(self): ) def test_dynamics(self): - """ - """ some_input = np.random.rand(TEST_NDIM) val = self.lin.dynamics(0.0, some_input) self.assertEqual(val.ndim, 1) self.assertEqual(val.shape[0], TEST_NDIM) def test_diffusionmatrix(self): - """ - """ val = self.lin.diffusionmatrix(0.0) self.assertEqual(val.ndim, 2) self.assertEqual(val.shape[0], TEST_NDIM) self.assertEqual(val.shape[1], TEST_NDIM) def test_jacobian(self): - """ - """ some_input = np.random.rand(TEST_NDIM) jac = self.lin.jacobian(0.0, some_input) self.assertEqual(jac.ndim, 2) @@ -107,48 +75,33 @@ def test_jacobian(self): self.assertEqual(jac.shape[1], TEST_NDIM) def test_dynamicsmatrix(self): - """ - """ dyna = self.lin.dynamicsmatrix(0.0) self.assertEqual(dyna.ndim, 2) self.assertEqual(dyna.shape[0], TEST_NDIM) self.assertEqual(dyna.shape[1], TEST_NDIM) def test_force(self): - """ - """ force = self.lin.force(0.0) self.assertEqual(force.ndim, 1) self.assertEqual(force.shape[0], TEST_NDIM) def test_sample(self): - """ - """ some_input = np.random.rand(TEST_NDIM) samp = self.lin.sample(0.0, some_input) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) def test_ndim(self): - """ - """ self.assertEqual(self.lin.ndim, TEST_NDIM) def test_pdf(self): - """ - """ some_state = np.random.rand(TEST_NDIM) evl = self.lin.pdf(some_state, 0.0, some_state) self.assertEqual(np.isscalar(evl), True) class TestLTI(unittest.TestCase): - """ - """ - def setUp(self): - """ - """ dynamat = np.random.rand(TEST_NDIM, TEST_NDIM) diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM) self.lti = discretegaussianmodel.DiscreteGaussianLTIModel( @@ -156,16 +109,12 @@ def setUp(self): ) def test_dynamics(self): - """ - """ some_input = np.random.rand(TEST_NDIM) val = self.lti.dynamics(0.0, some_input) self.assertEqual(val.ndim, 1) self.assertEqual(val.shape[0], TEST_NDIM) def test_dynamicsmatrix(self): - """ - """ some_input = np.random.rand(TEST_NDIM) dyna = self.lti.dynamicsmatrix(0.0) self.assertEqual(dyna.ndim, 2) @@ -173,16 +122,12 @@ def test_dynamicsmatrix(self): self.assertEqual(dyna.shape[1], TEST_NDIM) def test_diffusionmatrix(self): - """ - """ val = self.lti.diffusionmatrix(0.0) self.assertEqual(val.ndim, 2) self.assertEqual(val.shape[0], TEST_NDIM) self.assertEqual(val.shape[1], TEST_NDIM) def test_jacobian(self): - """ - """ some_input = np.random.rand(TEST_NDIM) jac = self.lti.jacobian(0.0, some_input) self.assertEqual(jac.ndim, 2) @@ -190,28 +135,20 @@ def test_jacobian(self): self.assertEqual(jac.shape[1], TEST_NDIM) def test_force(self): - """ - """ force = self.lti.force(0.0) self.assertEqual(force.ndim, 1) self.assertEqual(force.shape[0], TEST_NDIM) def test_sample(self): - """ - """ some_input = np.random.rand(TEST_NDIM) samp = self.lti.sample(0.0, some_input) self.assertEqual(samp.ndim, 1) self.assertEqual(samp.shape[0], TEST_NDIM) def test_ndim(self): - """ - """ self.assertEqual(self.lti.ndim, TEST_NDIM) def test_pdf(self): - """ - """ some_state = np.random.rand(TEST_NDIM) evl = self.lti.pdf(some_state, 0.0, some_state) self.assertEqual(np.isscalar(evl), True) diff --git a/tests/test_filtsmooth/test_statespace/test_discrete/test_discretemodel.py b/tests/test_filtsmooth/test_statespace/test_discrete/test_discretemodel.py index 3649ea1f0..79335284e 100644 --- a/tests/test_filtsmooth/test_statespace/test_discrete/test_discretemodel.py +++ b/tests/test_filtsmooth/test_statespace/test_discrete/test_discretemodel.py @@ -1,6 +1,3 @@ -""" -""" - import unittest from probnum.filtsmooth.statespace.discrete import discretemodel @@ -9,40 +6,24 @@ class MockDiscreteModel(discretemodel.DiscreteModel): - """ - """ - def sample(self, time, state, **kwargs): - """ - """ return state @property def ndim(self): - """ """ return TEST_NDIM class TestDiscreteModel(unittest.TestCase): - """ - """ - def setUp(self): - """ """ self.mdm = MockDiscreteModel() def test_sample(self): - """ - """ self.mdm.sample(0.0, 0.0) def test_ndim(self): - """ - """ self.assertEqual(self.mdm.ndim, TEST_NDIM) def test_pdf(self): - """ - """ with self.assertRaises(NotImplementedError): self.mdm.pdf(0.0, 0.0, 0.0) diff --git a/tests/test_quad/test_polynomial/test_clenshawcurtis.py b/tests/test_quad/test_polynomial/test_clenshawcurtis.py index 011fa4b60..ba0f00d3e 100644 --- a/tests/test_quad/test_polynomial/test_clenshawcurtis.py +++ b/tests/test_quad/test_polynomial/test_clenshawcurtis.py @@ -123,8 +123,7 @@ def setUp(self): var = 0.01 def gaussian(x): - """ - """ + return np.exp(-((x - mean) ** 2) / (2 * var)) / np.sqrt(2 * np.pi * var) ilbds = np.array([[mean - 3 * np.sqrt(var), mean]]) diff --git a/tests/test_quad/test_quadrature.py b/tests/test_quad/test_quadrature.py index 827f60281..585d91063 100644 --- a/tests/test_quad/test_quadrature.py +++ b/tests/test_quad/test_quadrature.py @@ -14,8 +14,6 @@ class TestQuadrature(unittest.TestCase): """ def setUp(self): - """ - """ npts = 10000 ndim = 1 nodes = np.random.rand(npts, ndim) @@ -24,9 +22,6 @@ def setUp(self): self.quad = polynomialquadrature.PolynomialQuadrature(nodes, weights, bounds) def test_compute(self): - """ - """ - def testfct(x): return 10 * x ** 3 - x # true integral: 2.5*x**4 - 0.5*x**2 + const @@ -37,8 +32,6 @@ def testfct(x): self.assertLess(np.abs(res_vec - res_seq), 1e-10) def test_wrong_inputs(self): - """ - """ npts = 10 ndim = 1 good_nodes = np.random.rand(npts, ndim) diff --git a/tests/test_utils/test_arrayutils.py b/tests/test_utils/test_arrayutils.py index 7b0ac8aab..9dc574d62 100644 --- a/tests/test_utils/test_arrayutils.py +++ b/tests/test_utils/test_arrayutils.py @@ -1,6 +1,3 @@ -""" -""" - import unittest import numpy as np @@ -9,19 +6,12 @@ class TestAssertIsArray(unittest.TestCase): - """ - """ - def test_assert_is_1d_ndarray_pass(self): - """ - """ arr = np.random.rand(4) arrayutils.assert_is_1d_ndarray(arr) self.assertEqual(1, 1) def test_assert_is_1d_ndarray_fail(self): - """ - """ arr_wrong = np.random.rand(4, 1) with self.assertRaises(ValueError): arrayutils.assert_is_1d_ndarray(arr_wrong) @@ -31,15 +21,11 @@ def test_assert_is_1d_ndarray_fail(self): arrayutils.assert_is_1d_ndarray(float_wrong) def test_assert_is_2d_ndarray_pass(self): - """ - """ arr = np.random.rand(4, 1) arrayutils.assert_is_2d_ndarray(arr) self.assertEqual(1, 1) def test_assert_is_2d_ndarray_fail(self): - """ - """ arr_wrong1 = np.random.rand(4,) with self.assertRaises(ValueError): arrayutils.assert_is_2d_ndarray(arr_wrong1) @@ -54,8 +40,6 @@ def test_assert_is_2d_ndarray_fail(self): def _set_in_bounds(ptset, ilbds): - """ - """ for (idx, col) in enumerate(ptset.T): if np.amin(col) != ilbds[idx, 0]: return False diff --git a/tests/test_utils/test_fctutils.py b/tests/test_utils/test_fctutils.py index e45a57d2d..a2278f5cf 100644 --- a/tests/test_utils/test_fctutils.py +++ b/tests/test_utils/test_fctutils.py @@ -1,6 +1,3 @@ -""" -""" - import unittest import numpy as np @@ -9,13 +6,7 @@ class TestAssertsEvaluatesToScalar(unittest.TestCase): - """ - """ - def test_assert_evaluates_to_scalar_pass(self): - """ - """ - def fct(x): return np.linalg.norm(x) @@ -24,9 +15,6 @@ def fct(x): self.assertEqual(1, 1) def test_assert_evaluates_to_scalar_fail(self): - """ - """ - def fct(x): return np.array(x) @@ -36,8 +24,7 @@ def fct(x): def _set_in_bounds(ptset, ilbds): - """ - """ + for (idx, col) in enumerate(ptset.T): if np.amin(col) != ilbds[idx, 0]: return False From 69596de7ca2a28417cc29fb1a333e2e4b8283f67 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:05:55 +0200 Subject: [PATCH 26/44] Fixed "wildcard-import" in ./tests --- .../test_diffeq/test_odefiltsmooth/test_ivp2filter.py | 3 ++- .../test_odefiltsmooth/test_odefiltsmooth.py | 2 +- .../test_gaussfiltsmooth/filtsmooth_testcases.py | 11 ++++++++--- .../test_gaussfiltsmooth/test_extendedkalman.py | 9 +++++++-- .../test_gaussfiltsmooth/test_kalman.py | 9 +++++++-- .../test_gaussfiltsmooth/test_unscentedkalman.py | 9 +++++++-- 6 files changed, 32 insertions(+), 11 deletions(-) diff --git a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py index 055ac5dd8..492c1f4af 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py @@ -9,10 +9,11 @@ import numpy as np import unittest + from probnum.diffeq import ivp2filter, lotkavolterra, IBM from probnum.filtsmooth import ExtendedKalman, UnscentedKalman from probnum.prob import RandomVariable, Normal, Dirac -from tests.testing import * +from tests.testing import NumpyAssertions class Ivp2FilterTestCase(unittest.TestCase, NumpyAssertions): diff --git a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py index fcdd15a61..f288985e4 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_odefiltsmooth.py @@ -14,7 +14,7 @@ import numpy as np -from probnum.diffeq.odefiltsmooth import * +from probnum.diffeq.odefiltsmooth import probsolve_ivp from probnum.diffeq import ode from probnum.prob import RandomVariable, Dirac diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py index 2b55d203d..4a5528e5c 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/filtsmooth_testcases.py @@ -5,9 +5,14 @@ import numpy as np -from probnum.filtsmooth import * -from probnum.prob import * - +from probnum.filtsmooth import ( + DiscreteGaussianLTIModel, + generate_dd, + LTISDEModel, + generate_cd, + DiscreteGaussianModel, +) +from probnum.prob import RandomVariable, Normal from tests.testing import NumpyAssertions __all__ = [ diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py index fbdeb60d0..8e16da08e 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py @@ -1,8 +1,13 @@ +import numpy as np import scipy.linalg -from probnum.filtsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth import ExtendedKalman -from .filtsmooth_testcases import * +from .filtsmooth_testcases import ( + CarTrackingDDTestCase, + OrnsteinUhlenbeckCDTestCase, + PendulumNonlinearDDTestCase, +) np.random.seed(5472) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py index 082d2d358..e05ffde40 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py @@ -1,8 +1,13 @@ +import numpy as np import scipy.linalg -from probnum.filtsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth import Kalman -from .filtsmooth_testcases import * +from .filtsmooth_testcases import ( + CarTrackingDDTestCase, + OrnsteinUhlenbeckCDTestCase, + PendulumNonlinearDDTestCase, +) np.random.seed(5472) VISUALISE = False # show plots or not? diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py index b69f116c2..4fc3d359e 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_unscentedkalman.py @@ -1,8 +1,13 @@ +import numpy as np import scipy.linalg -from probnum.filtsmooth.gaussfiltsmooth import * +from probnum.filtsmooth.gaussfiltsmooth import UnscentedKalman -from .filtsmooth_testcases import * +from .filtsmooth_testcases import ( + CarTrackingDDTestCase, + OrnsteinUhlenbeckCDTestCase, + PendulumNonlinearDDTestCase, +) np.random.seed(5472) From 0b9aa1fb369e94f7539e096f12d3087169d3976a Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:07:54 +0200 Subject: [PATCH 27/44] Fixed "unused-import" in ./tests --- tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py | 2 +- tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py | 6 +----- tests/test_prob/test_distributions/test_dirac.py | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py index 492c1f4af..4a523d930 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py @@ -12,7 +12,7 @@ from probnum.diffeq import ivp2filter, lotkavolterra, IBM from probnum.filtsmooth import ExtendedKalman, UnscentedKalman -from probnum.prob import RandomVariable, Normal, Dirac +from probnum.prob import RandomVariable, Dirac from tests.testing import NumpyAssertions diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py index e05ffde40..1be02cad3 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalman.py @@ -3,11 +3,7 @@ from probnum.filtsmooth.gaussfiltsmooth import Kalman -from .filtsmooth_testcases import ( - CarTrackingDDTestCase, - OrnsteinUhlenbeckCDTestCase, - PendulumNonlinearDDTestCase, -) +from .filtsmooth_testcases import CarTrackingDDTestCase, OrnsteinUhlenbeckCDTestCase np.random.seed(5472) VISUALISE = False # show plots or not? diff --git a/tests/test_prob/test_distributions/test_dirac.py b/tests/test_prob/test_distributions/test_dirac.py index 7762e49ea..ca3cfd61a 100644 --- a/tests/test_prob/test_distributions/test_dirac.py +++ b/tests/test_prob/test_distributions/test_dirac.py @@ -1,9 +1,7 @@ """Tests for the Dirac distributions.""" -import unittest -from tests.testing import NumpyAssertions - import numpy as np +import unittest from probnum import prob From c7b4655cbbb1842da2d50087cba901d35e97345b Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:20:13 +0200 Subject: [PATCH 28/44] Added exceptions for "cell-var-from-loop" in some tests --- tests/test_linalg/test_linearsolvers/test_linearsolvers.py | 2 ++ tests/test_quad/test_polynomial/test_clenshawcurtis.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py index eda244f47..29e93da45 100644 --- a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py +++ b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py @@ -243,6 +243,7 @@ def test_posterior_uncertainty_zero_in_explored_space(self): S = [] # search directions Y = [] # observations + # pylint: disable=cell-var-from-loop def callback_postparams(xk, Ak, Ainvk, sk, yk, alphak, resid): S.append(sk) Y.append(yk) @@ -394,6 +395,7 @@ def callback_iterates_CG(xk): # Define callback function to obtain search directions pls_iterates = [] + # pylint: disable=cell-var-from-loop def callback_iterates_PLS( xk, Ak, Ainvk, sk, yk, alphak, resid, **kwargs ): diff --git a/tests/test_quad/test_polynomial/test_clenshawcurtis.py b/tests/test_quad/test_polynomial/test_clenshawcurtis.py index ba0f00d3e..2ca88c317 100644 --- a/tests/test_quad/test_polynomial/test_clenshawcurtis.py +++ b/tests/test_quad/test_polynomial/test_clenshawcurtis.py @@ -45,6 +45,7 @@ def test_integrate_polyn_good_degree(self): random_poly = np.random.randint(1, 11, (number + 1, number + 1)) integrated_poly = npoly.polyint(npoly.polyint(random_poly).T).T + # pylint: disable=cell-var-from-loop def testpoly(val): return npoly.polyval2d(val[:, 0], val[:, 1], c=random_poly) @@ -77,6 +78,7 @@ def test_integrate_polyn_bad_degree(self): random_poly = np.random.randint(1, 11, config) integrated_poly = npoly.polyint(npoly.polyint(random_poly).T).T + # pylint: disable=cell-var-from-loop def testpoly(val): return npoly.polyval2d(val[:, 0], val[:, 1], c=random_poly) From 2502c79ebfdd73c853ccdabd3458d5b691d086ce Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:24:17 +0200 Subject: [PATCH 29/44] Fixed "no-member" and "c-extension-no-member" --- pyproject.toml | 3 +++ tests/test_diffeq/test_steprule.py | 8 +++++--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e83c80bad..94a706d91 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,3 +62,6 @@ attribute-defined-outside-init, [tool.pylint.format] max-line-length = "88" + +[tool.pylint.master] +extension-pkg-whitelist = "numpy" diff --git a/tests/test_diffeq/test_steprule.py b/tests/test_diffeq/test_steprule.py index c97403681..fe5fc3cf4 100644 --- a/tests/test_diffeq/test_steprule.py +++ b/tests/test_diffeq/test_steprule.py @@ -1,8 +1,10 @@ -from probnum.diffeq import steprule -import unittest import numpy as np +import unittest + +from probnum.diffeq import steprule + -random_state = np.random.RandomState(seed=1234) +random_state = np.random.mtrand.RandomState(seed=1234) class TestConstantStep(unittest.TestCase): From 45dab4c63ce20add4e5c80346bd5bb9edd122e25 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:25:42 +0200 Subject: [PATCH 30/44] Added exception for a "consider-using-in" in a test --- tests/test_prob/test_distributions/test_dirac.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_prob/test_distributions/test_dirac.py b/tests/test_prob/test_distributions/test_dirac.py index ca3cfd61a..623359411 100644 --- a/tests/test_prob/test_distributions/test_dirac.py +++ b/tests/test_prob/test_distributions/test_dirac.py @@ -21,6 +21,7 @@ def test_sample_shapes(self): for sample_size in [1, (), 10, (4,), (3, 2)]: with self.subTest(): s = prob.Dirac(support=supp).sample(size=sample_size) + # pylint: disable=consider-using-in if sample_size == 1 or sample_size == (): self.assertEqual(np.shape(supp), np.shape(s)) elif isinstance(sample_size, tuple): From 4b41e9e03695c38989950aad98eff1a82bf7a16c Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:26:33 +0200 Subject: [PATCH 31/44] Add tests back into pylint, but with even more exceptions --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index a1bde641e..1e6ce6941 100644 --- a/tox.ini +++ b/tox.ini @@ -52,4 +52,4 @@ description = Code linting with pylint deps = pylint commands = pylint src - # pylint test + pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass" \ No newline at end of file From b46e5a2d97e1e066ddc2c44b1dd24aeeb9e5c294 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:30:36 +0200 Subject: [PATCH 32/44] Final exception to pass the tests --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 94a706d91..b4e6092d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ unused-variable, broad-except, abstract-class-instantiated, protected-access, +duplicate-code, too-many-instance-attributes, too-many-statements, From e14285937135108310c21027f16b765bb84816e1 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 09:56:00 +0200 Subject: [PATCH 33/44] Fixed "wrong-import-order" by adding an in-file exception --- pyproject.toml | 1 - src/probnum/__init__.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b4e6092d5..563cd4504 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,7 +47,6 @@ redefined-builtin, too-few-public-methods, too-many-locals, missing-function-docstring, -wrong-import-order, no-self-use, too-many-lines, unused-variable, diff --git a/src/probnum/__init__.py b/src/probnum/__init__.py index 54f6bdacc..7e91cbb73 100644 --- a/src/probnum/__init__.py +++ b/src/probnum/__init__.py @@ -22,6 +22,7 @@ diffeq Probabilistic solvers for ordinary differential equations. """ +# pylint: disable=wrong-import-order from . import diffeq from . import filtsmooth From a7b82364d1594971382fa5a9d4dd91ee022ec62b Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 10:10:16 +0200 Subject: [PATCH 34/44] Grouped the docstring exceptions --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 563cd4504..6fd1bb011 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ no-else-raise, fixme, too-many-arguments, missing-module-docstring, +missing-function-docstring, abstract-method, unused-argument, too-many-branches, @@ -46,7 +47,6 @@ arguments-differ, redefined-builtin, too-few-public-methods, too-many-locals, -missing-function-docstring, no-self-use, too-many-lines, unused-variable, From e75199dcb102e758875ae2f0f309bdeaa5275ef7 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 10:16:40 +0200 Subject: [PATCH 35/44] Fixed "wrong-import-order" in tests --- tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py | 6 +++--- tests/test_diffeq/test_steprule.py | 4 ++-- .../test_gaussfiltsmooth/test_kalmanposterior.py | 4 ++-- .../test_linalg/test_linearsolvers/test_linearsolvers.py | 9 ++++----- tests/test_linalg/test_linops/test_linearoperators.py | 5 ++--- tests/test_prob/test_distributions/test_dirac.py | 2 +- tests/test_prob/test_distributions/test_normal.py | 5 ++--- 7 files changed, 16 insertions(+), 19 deletions(-) diff --git a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py index 4a523d930..911146427 100644 --- a/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py +++ b/tests/test_diffeq/test_odefiltsmooth/test_ivp2filter.py @@ -6,13 +6,13 @@ * Does the measurement model do what we think it does * Are the initial values initialised truthfully (y0, f(y0), Jf(y0)f(y0), ...) """ +import unittest import numpy as np -import unittest -from probnum.diffeq import ivp2filter, lotkavolterra, IBM +from probnum.diffeq import IBM, ivp2filter, lotkavolterra from probnum.filtsmooth import ExtendedKalman, UnscentedKalman -from probnum.prob import RandomVariable, Dirac +from probnum.prob import Dirac, RandomVariable from tests.testing import NumpyAssertions diff --git a/tests/test_diffeq/test_steprule.py b/tests/test_diffeq/test_steprule.py index fe5fc3cf4..bacb7f45e 100644 --- a/tests/test_diffeq/test_steprule.py +++ b/tests/test_diffeq/test_steprule.py @@ -1,8 +1,8 @@ -import numpy as np import unittest -from probnum.diffeq import steprule +import numpy as np +from probnum.diffeq import steprule random_state = np.random.mtrand.RandomState(seed=1234) diff --git a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py index 51935b0f6..b9a87848c 100644 --- a/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py +++ b/tests/test_filtsmooth/test_gaussfiltsmooth/test_kalmanposterior.py @@ -1,10 +1,10 @@ import numpy as np -from probnum.prob._randomvariablelist import _RandomVariableList from probnum.filtsmooth.gaussfiltsmooth import Kalman +from probnum.prob._randomvariablelist import _RandomVariableList +from tests.testing import NumpyAssertions from .filtsmooth_testcases import CarTrackingDDTestCase -from tests.testing import NumpyAssertions class TestKalmanPosterior(CarTrackingDDTestCase, NumpyAssertions): diff --git a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py index 29e93da45..080fd4afb 100644 --- a/tests/test_linalg/test_linearsolvers/test_linearsolvers.py +++ b/tests/test_linalg/test_linearsolvers/test_linearsolvers.py @@ -1,17 +1,16 @@ """Tests for linear solvers.""" - -import unittest -from tests.testing import NumpyAssertions import os +import unittest import numpy as np import scipy.sparse import scipy.sparse.linalg -from probnum import prob -from probnum import linalg +from probnum import linalg, prob from probnum.linalg import linops +from tests.testing import NumpyAssertions + class LinearSolverTestCase(unittest.TestCase, NumpyAssertions): """General test case for linear solvers.""" diff --git a/tests/test_linalg/test_linops/test_linearoperators.py b/tests/test_linalg/test_linops/test_linearoperators.py index d3dc9624f..acf8ea32c 100644 --- a/tests/test_linalg/test_linops/test_linearoperators.py +++ b/tests/test_linalg/test_linops/test_linearoperators.py @@ -1,13 +1,12 @@ """Tests for linear operators.""" - import itertools - import unittest -from tests.testing import NumpyAssertions + import numpy as np import scipy.sparse from probnum.linalg import linops +from tests.testing import NumpyAssertions class LinearOperatorTestCase(unittest.TestCase, NumpyAssertions): diff --git a/tests/test_prob/test_distributions/test_dirac.py b/tests/test_prob/test_distributions/test_dirac.py index 623359411..a32eb04f2 100644 --- a/tests/test_prob/test_distributions/test_dirac.py +++ b/tests/test_prob/test_distributions/test_dirac.py @@ -1,7 +1,7 @@ """Tests for the Dirac distributions.""" +import unittest import numpy as np -import unittest from probnum import prob diff --git a/tests/test_prob/test_distributions/test_normal.py b/tests/test_prob/test_distributions/test_normal.py index 9ec61d593..9c773b1ee 100644 --- a/tests/test_prob/test_distributions/test_normal.py +++ b/tests/test_prob/test_distributions/test_normal.py @@ -1,8 +1,6 @@ """Tests for the normal distribution.""" - -import unittest import itertools -from tests.testing import NumpyAssertions +import unittest import numpy as np import scipy.sparse @@ -10,6 +8,7 @@ from probnum import prob from probnum.linalg import linops +from tests.testing import NumpyAssertions def _random_spd_matrix(D=10): From eb412b6b572d04ca60e4e29e51ab54daa786a809 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 10:30:01 +0200 Subject: [PATCH 36/44] Fixed "unused-variable" --- pyproject.toml | 1 - src/probnum/diffeq/ode/ivp.py | 2 ++ src/probnum/linalg/linops/linearoperators.py | 2 +- src/probnum/prob/distributions/normal.py | 2 +- src/probnum/quad/bayesian/bayesquadrature.py | 4 ++-- src/probnum/quad/polynomial/clenshawcurtis.py | 2 +- tox.ini | 2 +- 7 files changed, 8 insertions(+), 7 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6fd1bb011..d2d8a2a4b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,6 @@ too-few-public-methods, too-many-locals, no-self-use, too-many-lines, -unused-variable, broad-except, abstract-class-instantiated, protected-access, diff --git a/src/probnum/diffeq/ode/ivp.py b/src/probnum/diffeq/ode/ivp.py index cb28e9357..756798e37 100644 --- a/src/probnum/diffeq/ode/ivp.py +++ b/src/probnum/diffeq/ode/ivp.py @@ -4,6 +4,7 @@ there might be more ode-based problems, such as bvp. """ +# pylint: disable=unused-variable import numpy as np @@ -284,6 +285,7 @@ class IVP(ODE): """ def __init__(self, timespan, initrv, rhs, jac=None, hess=None, sol=None): + self.initrv = initrv super().__init__(timespan=timespan, rhs=rhs, jac=jac, hess=hess, sol=sol) diff --git a/src/probnum/linalg/linops/linearoperators.py b/src/probnum/linalg/linops/linearoperators.py index 7e3ace187..1e971629e 100644 --- a/src/probnum/linalg/linops/linearoperators.py +++ b/src/probnum/linalg/linops/linearoperators.py @@ -567,7 +567,7 @@ def det(self): return np.linalg.det(self.A) def logabsdet(self): - sign, logdet = np.linalg.slogdet(self.A) + _sign, logdet = np.linalg.slogdet(self.A) return logdet def trace(self): diff --git a/src/probnum/prob/distributions/normal.py b/src/probnum/prob/distributions/normal.py index 2bb3a4e43..21b1b370b 100644 --- a/src/probnum/prob/distributions/normal.py +++ b/src/probnum/prob/distributions/normal.py @@ -605,7 +605,7 @@ def sample(self, size=()): def __matmul__(self, other): if isinstance(other, Dirac): - delta = other.mean() + # delta = other.mean() raise NotImplementedError return NotImplemented diff --git a/src/probnum/quad/bayesian/bayesquadrature.py b/src/probnum/quad/bayesian/bayesquadrature.py index 3876a7948..91b39b71f 100644 --- a/src/probnum/quad/bayesian/bayesquadrature.py +++ b/src/probnum/quad/bayesian/bayesquadrature.py @@ -174,9 +174,9 @@ def integrate(self, fun, fun0, domain, nevals, **kwargs): F = None # Iteration - for i in range(nevals): + for _ in range(nevals): # Predictive Distribution - fun_pred = None + # fun_pred = None # Observation diff --git a/src/probnum/quad/polynomial/clenshawcurtis.py b/src/probnum/quad/polynomial/clenshawcurtis.py index aa0b01b17..9cbcdfe79 100644 --- a/src/probnum/quad/polynomial/clenshawcurtis.py +++ b/src/probnum/quad/polynomial/clenshawcurtis.py @@ -117,7 +117,7 @@ def _compute_weights(npts, ndim, ilbds): """ if npts ** ndim * ndim >= 1e9: raise MemoryError("Weights for tensor-mesh too large for memory.") - num_tiles = np.arange(ndim) + # num_tiles = np.arange(ndim) num_reps = ndim - np.arange(ndim) - 1 weights = _compute_weights_1d(npts, ndim, ilbds[0]) prodweights = np.repeat(weights, npts ** (num_reps[0])) diff --git a/tox.ini b/tox.ini index 1e6ce6941..bc5a47273 100644 --- a/tox.ini +++ b/tox.ini @@ -52,4 +52,4 @@ description = Code linting with pylint deps = pylint commands = pylint src - pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass" \ No newline at end of file + pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable" \ No newline at end of file From adad439dcd42be558ccb326f0f6bfce426f2eb6f Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 10:33:53 +0200 Subject: [PATCH 37/44] Fixed "wrong-impor-order" in tests for real --- tests/test_prob/test_randomvariable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_prob/test_randomvariable.py b/tests/test_prob/test_randomvariable.py index 5e8a732f3..33ee9c2d9 100644 --- a/tests/test_prob/test_randomvariable.py +++ b/tests/test_prob/test_randomvariable.py @@ -6,9 +6,9 @@ import numpy as np import scipy.stats -from tests.testing import NumpyAssertions from probnum import prob from probnum.linalg import linops +from tests.testing import NumpyAssertions class RandomVariableTestCase(unittest.TestCase, NumpyAssertions): From 22311d8eec62e9f11f12bc65e91fdceb4ec02348 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 11:21:41 +0200 Subject: [PATCH 38/44] Added mccabe complexity checker --- pyproject.toml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index d2d8a2a4b..12ab6ef61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,7 @@ broad-except, abstract-class-instantiated, protected-access, duplicate-code, +too-complex, too-many-instance-attributes, too-many-statements, @@ -64,3 +65,10 @@ max-line-length = "88" [tool.pylint.master] extension-pkg-whitelist = "numpy" +# load-plugins="pylint.extensions.docparams,pylint.extensions.docstyle" +load-plugins = """ +pylint.extensions.mccabe +""" + +[tool.pylint.design] +max-complexity = "14" From 970f953550b03c9db2adb0965d2306848bfb8504 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 11:28:49 +0200 Subject: [PATCH 39/44] Added more pylint extensions --- pyproject.toml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 12ab6ef61..d422125dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,9 +65,13 @@ max-line-length = "88" [tool.pylint.master] extension-pkg-whitelist = "numpy" -# load-plugins="pylint.extensions.docparams,pylint.extensions.docstyle" +# pylint.extensions.docstyle, +# pylint.extensions.check_elif, +# pylint.extensions.docparams, load-plugins = """ -pylint.extensions.mccabe +pylint.extensions.mccabe, +pylint.extensions.redefined_variable_type, +pylint.extensions.overlapping_exceptions, """ [tool.pylint.design] From 6e847a5ca0916bab97348f04657d22c303ae6131 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Tue, 18 Aug 2020 18:10:18 +0200 Subject: [PATCH 40/44] Fixed sphinx warnings --- .../filtsmooth/gaussfiltsmooth/unscentedtransform.py | 2 +- .../filtsmooth/statespace/continuous/linearsdemodel.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py index a1b9b60bb..baa85e741 100644 --- a/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py +++ b/src/probnum/filtsmooth/gaussfiltsmooth/unscentedtransform.py @@ -95,7 +95,7 @@ def propagate(self, time, sigmapts, modelfct): Time :math:`t` which is passed on to the modelfunction. sigmapts : np.ndarray, shape=(2 N+1, N) Sigma points (N is the spatial dimension of the dynamic model) - modelfct : callable, signature=(t, x, **kwargs) + modelfct : callable, signature=(t, x, \\**kwargs) Function through which to propagate Returns diff --git a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py index dee581c99..71c89016d 100644 --- a/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py +++ b/src/probnum/filtsmooth/statespace/continuous/linearsdemodel.py @@ -25,15 +25,15 @@ class LinearSDEModel(continuousmodel.ContinuousModel): Parameters ---------- - driftmatrixfct : callable, signature=(t, **kwargs) + driftmatrixfct : callable, signature=(t, \\**kwargs) This is F = F(t). The evaluations of this function are called the drift(matrix) of the SDE. Returns np.ndarray with shape=(n, n) - forcfct : callable, signature=(t, **kwargs) + forcfct : callable, signature=(t, \\**kwargs) This is u = u(t). Evaluations of this function are called the force(vector) of the SDE. Returns np.ndarray with shape=(n,) - dispmatrixfct : callable, signature=(t, **kwargs) + dispmatrixfct : callable, signature=(t, \\**kwargs) This is L = L(t). Evaluations of this function are called the dispersion(matrix) of the SDE. Returns np.ndarray with shape=(n, s) From e93210225a0a0088c73832c967ba4b057e820b86 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Wed, 19 Aug 2020 08:36:58 +0200 Subject: [PATCH 41/44] Changed pylint to run on a per-module basis, more srictly --- pyproject.toml | 20 -------------------- tox.ini | 13 +++++++++++-- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index d422125dd..5f8435a0f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,28 +36,8 @@ invalid-name, no-else-return, no-else-raise, -fixme, -too-many-arguments, missing-module-docstring, missing-function-docstring, -abstract-method, -unused-argument, -too-many-branches, -arguments-differ, -redefined-builtin, -too-few-public-methods, -too-many-locals, -no-self-use, -too-many-lines, -broad-except, -abstract-class-instantiated, -protected-access, -duplicate-code, -too-complex, - -too-many-instance-attributes, -too-many-statements, -attribute-defined-outside-init, """ [tool.pylint.format] diff --git a/tox.ini b/tox.ini index bc5a47273..9d3b923bd 100644 --- a/tox.ini +++ b/tox.ini @@ -46,10 +46,19 @@ commands = asv machine --yes asv dev + + + [testenv:pylint] basepython = python3 description = Code linting with pylint deps = pylint commands = - pylint src - pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable" \ No newline at end of file + # pylint src + pylint src/probnum/diffeq --disable="protected-access,abstract-class-instantiated,too-many-locals,too-few-public-methods,too-many-arguments,unused-argument" + pylint src/probnum/filtsmooth --disable="duplicate-code,protected-access,no-self-use,too-many-locals,arguments-differ,too-many-arguments,unused-argument" + pylint src/probnum/linalg --disable="attribute-defined-outside-init,too-many-statements,too-many-instance-attributes,too-complex,protected-access,too-many-lines,no-self-use,too-many-locals,redefined-builtin,arguments-differ,abstract-method,too-many-arguments,too-many-branches,duplicate-code,unused-argument,fixme" + pylint src/probnum/prob --disable="too-many-instance-attributes,broad-except,arguments-differ,abstract-method,too-many-arguments,protected-access,duplicate-code,unused-argument,fixme" + pylint src/probnum/quad --disable="attribute-defined-outside-init,too-few-public-methods,redefined-builtin,arguments-differ,unused-argument" + pylint src/probnum/utils --disable="duplicate-code" + pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable,protected-access,attribute-defined-outside-init,no-self-use,abstract-class-instantiated,too-many-arguments,too-many-instance-attributes,too-many-locals,unused-argument,fixme" From f21a7422c192836b490ff288e39d976bfec100b4 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Wed, 19 Aug 2020 10:09:43 +0200 Subject: [PATCH 42/44] Added the benchmarks to pylint --- benchmarks/__init__.py | 1 - tox.ini | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py index 8b1378917..e69de29bb 100644 --- a/benchmarks/__init__.py +++ b/benchmarks/__init__.py @@ -1 +0,0 @@ - diff --git a/tox.ini b/tox.ini index 9d3b923bd..44b7267d3 100644 --- a/tox.ini +++ b/tox.ini @@ -62,3 +62,4 @@ commands = pylint src/probnum/quad --disable="attribute-defined-outside-init,too-few-public-methods,redefined-builtin,arguments-differ,unused-argument" pylint src/probnum/utils --disable="duplicate-code" pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable,protected-access,attribute-defined-outside-init,no-self-use,abstract-class-instantiated,too-many-arguments,too-many-instance-attributes,too-many-locals,unused-argument,fixme" + pylint benchmarks --disable="attribute-defined-outside-init,unused-argument,redefined-builtin,missing-class-docstring,duplicate-code" From 4470023242b846d4a82eaa4e7e3b193f4c842d4a Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Wed, 19 Aug 2020 10:11:08 +0200 Subject: [PATCH 43/44] More comments for the pylint config in pyproject.toml --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5f8435a0f..161d6e61e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,9 +24,7 @@ exclude = ''' [tool.pylint.messages_control] -# From black: bad-continuation, bad-whitespace -# Exceptions we want to have: invalid-name -# Unclear to me: no-else-return, no-else-raise +# Exceptions suggested from Black: bad-continuation, bad-whitespace disable = """ bad-continuation, bad-whitespace, @@ -39,12 +37,14 @@ no-else-raise, missing-module-docstring, missing-function-docstring, """ +# Many more `disable`s are defined in `./tox.ini` on a per-module basis! [tool.pylint.format] max-line-length = "88" [tool.pylint.master] extension-pkg-whitelist = "numpy" +# Extensions that might be of interest in the future: # pylint.extensions.docstyle, # pylint.extensions.check_elif, # pylint.extensions.docparams, From 7c282b3e756592ed31c37beaaab0d508feeea100 Mon Sep 17 00:00:00 2001 From: Nathanael Bosch Date: Wed, 19 Aug 2020 11:03:07 +0200 Subject: [PATCH 44/44] Moved the "missing docu" exceptions to a per-module basis --- pyproject.toml | 3 --- tox.ini | 16 ++++++++-------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 161d6e61e..f44e6bfb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,9 +33,6 @@ invalid-name, no-else-return, no-else-raise, - -missing-module-docstring, -missing-function-docstring, """ # Many more `disable`s are defined in `./tox.ini` on a per-module basis! diff --git a/tox.ini b/tox.ini index 44b7267d3..79b48f26a 100644 --- a/tox.ini +++ b/tox.ini @@ -55,11 +55,11 @@ description = Code linting with pylint deps = pylint commands = # pylint src - pylint src/probnum/diffeq --disable="protected-access,abstract-class-instantiated,too-many-locals,too-few-public-methods,too-many-arguments,unused-argument" - pylint src/probnum/filtsmooth --disable="duplicate-code,protected-access,no-self-use,too-many-locals,arguments-differ,too-many-arguments,unused-argument" - pylint src/probnum/linalg --disable="attribute-defined-outside-init,too-many-statements,too-many-instance-attributes,too-complex,protected-access,too-many-lines,no-self-use,too-many-locals,redefined-builtin,arguments-differ,abstract-method,too-many-arguments,too-many-branches,duplicate-code,unused-argument,fixme" - pylint src/probnum/prob --disable="too-many-instance-attributes,broad-except,arguments-differ,abstract-method,too-many-arguments,protected-access,duplicate-code,unused-argument,fixme" - pylint src/probnum/quad --disable="attribute-defined-outside-init,too-few-public-methods,redefined-builtin,arguments-differ,unused-argument" - pylint src/probnum/utils --disable="duplicate-code" - pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable,protected-access,attribute-defined-outside-init,no-self-use,abstract-class-instantiated,too-many-arguments,too-many-instance-attributes,too-many-locals,unused-argument,fixme" - pylint benchmarks --disable="attribute-defined-outside-init,unused-argument,redefined-builtin,missing-class-docstring,duplicate-code" + pylint src/probnum/diffeq --disable="protected-access,abstract-class-instantiated,too-many-locals,too-few-public-methods,too-many-arguments,unused-argument,missing-module-docstring,missing-function-docstring" + pylint src/probnum/filtsmooth --disable="duplicate-code,protected-access,no-self-use,too-many-locals,arguments-differ,too-many-arguments,unused-argument,missing-module-docstring,missing-function-docstring" + pylint src/probnum/linalg --disable="attribute-defined-outside-init,too-many-statements,too-many-instance-attributes,too-complex,protected-access,too-many-lines,no-self-use,too-many-locals,redefined-builtin,arguments-differ,abstract-method,too-many-arguments,too-many-branches,duplicate-code,unused-argument,fixme,missing-module-docstring" + pylint src/probnum/prob --disable="too-many-instance-attributes,broad-except,arguments-differ,abstract-method,too-many-arguments,protected-access,duplicate-code,unused-argument,fixme,missing-module-docstring,missing-function-docstring" + pylint src/probnum/quad --disable="attribute-defined-outside-init,too-few-public-methods,redefined-builtin,arguments-differ,unused-argument,missing-module-docstring" + pylint src/probnum/utils --disable="duplicate-code,missing-module-docstring,missing-function-docstring" + pylint tests --disable="line-too-long,duplicate-code,missing-class-docstring,unnecessary-pass,unused-variable,protected-access,attribute-defined-outside-init,no-self-use,abstract-class-instantiated,too-many-arguments,too-many-instance-attributes,too-many-locals,unused-argument,fixme,missing-module-docstring,missing-function-docstring" + pylint benchmarks --disable="attribute-defined-outside-init,unused-argument,redefined-builtin,missing-class-docstring,duplicate-code,missing-function-docstring"