Skip to content

Commit

Permalink
Bugfixes for Jupyter notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
marvinpfoertner committed Aug 28, 2020
1 parent fe0a411 commit d34bb78
Show file tree
Hide file tree
Showing 10 changed files with 13,271 additions and 13,333 deletions.
108 changes: 50 additions & 58 deletions docs/source/introduction/quickstart.ipynb

Large diffs are not rendered by default.

19,100 changes: 9,520 additions & 9,580 deletions docs/source/tutorials/adaptive_steps_odefilter.ipynb

Large diffs are not rendered by default.

2,911 changes: 1,423 additions & 1,488 deletions docs/source/tutorials/galerkin_method.ipynb

Large diffs are not rendered by default.

311 changes: 135 additions & 176 deletions docs/source/tutorials/linear_operators.ipynb

Large diffs are not rendered by default.

312 changes: 137 additions & 175 deletions docs/source/tutorials/linear_systems.ipynb

Large diffs are not rendered by default.

3,303 changes: 1,736 additions & 1,567 deletions docs/source/tutorials/random_variables.ipynb

Large diffs are not rendered by default.

442 changes: 209 additions & 233 deletions docs/source/tutorials/uncertainties_odefilter.ipynb

Large diffs are not rendered by default.

12 changes: 8 additions & 4 deletions src/probnum/random_variables/_dirac.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ def __init__(

self._support = support

support_floating = self._support.astype(
np.promote_types(self._support.dtype, np.float_)
)

super().__init__(
shape=self._support.shape,
dtype=self._support.dtype,
Expand All @@ -71,17 +75,17 @@ def __init__(
pmf=lambda x: np.float_(1.0 if np.all(x == self._support) else 0.0),
cdf=lambda x: np.float_(0.0 if np.any(x < self._support) else 0.0),
mode=lambda: self._support,
median=lambda: self._support,
mean=lambda: self._support,
median=lambda: support_floating,
mean=lambda: support_floating,
cov=lambda: np.zeros_like( # pylint: disable=unexpected-keyword-arg
self._support,
support_floating,
shape=(
(self._support.size, self._support.size)
if self._support.ndim > 0
else ()
),
),
var=lambda: np.zeros_like(self._support),
var=lambda: np.zeros_like(support_floating),
)

@property
Expand Down
66 changes: 23 additions & 43 deletions src/probnum/random_variables/_normal.py
Original file line number Diff line number Diff line change
Expand Up @@ -440,31 +440,19 @@ def _univariate_in_support(x: _ValueType) -> bool:
return np.isfinite(x)

def _univariate_pdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.norm.pdf(x, loc=self._mean, scale=self._cov),
dtype=np.float_,
)
return scipy.stats.norm.pdf(x, loc=self._mean, scale=self._cov)

def _univariate_logpdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.norm.logpdf(x, loc=self._mean, scale=self._cov),
dtype=np.float_,
)
return scipy.stats.norm.logpdf(x, loc=self._mean, scale=self._cov)

def _univariate_cdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.norm.cdf(x, loc=self._mean, scale=self._cov),
dtype=np.float_,
)
return scipy.stats.norm.cdf(x, loc=self._mean, scale=self._cov)

def _univariate_logcdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.norm.logcdf(x, loc=self._mean, scale=self._cov),
dtype=np.float_,
)
return scipy.stats.norm.logcdf(x, loc=self._mean, scale=self._cov)

def _univariate_quantile(self, p: FloatArgType) -> np.floating:
return _utils.as_numpy_scalar(scipy.stats.norm.ppf(p), dtype=self.dtype)
return scipy.stats.norm.ppf(p)

def _univariate_entropy(self: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
Expand Down Expand Up @@ -504,48 +492,40 @@ def _dense_in_support(x: _ValueType) -> bool:
return np.all(np.isfinite(Normal._arg_todense(x)))

def _dense_pdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.multivariate_normal.pdf(
Normal._arg_todense(x).ravel(),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
),
dtype=np.float_,
return scipy.stats.multivariate_normal.pdf(
Normal._arg_todense(x).reshape(x.shape[: -self.ndim] + (-1,)),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)

def _dense_logpdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.multivariate_normal.logpdf(
Normal._arg_todense(x).ravel(),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)
return scipy.stats.multivariate_normal.logpdf(
Normal._arg_todense(x).reshape(x.shape[: -self.ndim] + (-1,)),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)

def _dense_cdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.multivariate_normal.cdf(
Normal._arg_todense(x).ravel(),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)
return scipy.stats.multivariate_normal.cdf(
Normal._arg_todense(x).reshape(x.shape[: -self.ndim] + (-1,)),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)

def _dense_logcdf(self, x: _ValueType) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.multivariate_normal.logcdf(
Normal._arg_todense(x).ravel(),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)
return scipy.stats.multivariate_normal.logcdf(
Normal._arg_todense(x).reshape(x.shape[: -self.ndim] + (-1,)),
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)

def _dense_entropy(self) -> np.float_:
return _utils.as_numpy_scalar(
scipy.stats.multivariate_normal.entropy(
mean=self._dense_mean.ravel(),
cov=self._dense_cov,
)
),
dtype=np.float_,
)

# Matrixvariate Gaussian with Kronecker covariance
Expand Down
39 changes: 30 additions & 9 deletions src/probnum/random_variables/_random_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
of probabilistic numerical methods.
"""

from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar
from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union

import numpy as np

Expand Down Expand Up @@ -361,7 +361,9 @@ def entropy(self) -> np.float_:

entropy = self.__entropy()

entropy = RandomVariable._ensure_numpy_float("entropy", entropy)
entropy = RandomVariable._ensure_numpy_float(
"entropy", entropy, force_scalar=True
)

return entropy

Expand Down Expand Up @@ -694,19 +696,38 @@ def _check_property_value(
)

@classmethod
def _ensure_numpy_float(cls, name: str, value: Any) -> np.float_:
if not isinstance(value, np.float_):
def _ensure_numpy_float(
cls, name: str, value: Any, force_scalar: bool = False
) -> Union[np.float_, np.ndarray]:
if np.isscalar(value):
if not isinstance(value, np.float_):
try:
value = _utils.as_numpy_scalar(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value that can be "
f"converted to a `np.float_`, which is not possible for "
f"{value} of type {type(value)}."
) from err
elif not force_scalar:
try:
value = _utils.as_numpy_scalar(value, dtype=np.float_)
value = np.asarray(value, dtype=np.float_)
except TypeError as err:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value that can be "
f"converted to a `np.float_`, which is possible for {value} "
f"of type {type(value)}."
f"`{cls.__name__}` must return a value that can be converted "
f"to a `np.ndarray` of type `np.float_`, which is not possible "
f"for {value} of type {type(value)}."
) from err
else:
raise TypeError(
f"The function `{name}` specified via the constructor of "
f"`{cls.__name__}` must return a scalar value, but {value} of type "
f"{type(value)} is not scalar."
)

assert isinstance(value, np.float_)
assert isinstance(value, (np.float_, np.ndarray))

return value

Expand Down

0 comments on commit d34bb78

Please sign in to comment.