From 2ab3175834ee125a429d17df393f8d18291911dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=B4me=20H=C3=A9nin?= Date: Tue, 18 Jun 2024 10:57:24 +0200 Subject: [PATCH] Update to new numpy --- folie/domains/__init__.py | 4 ++-- folie/estimation/mle.py | 6 +++--- folie/simulations/__init__.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/folie/domains/__init__.py b/folie/domains/__init__.py index 38cd216..aa970e5 100644 --- a/folie/domains/__init__.py +++ b/folie/domains/__init__.py @@ -53,8 +53,8 @@ def Rd(cls, dim): if dim < 1: dim = 1 range = np.empty((2, dim)) - range[0, :] = -np.infty - range[1, :] = np.infty + range[0, :] = -np.inf + range[1, :] = np.inf return cls(range) @classmethod diff --git a/folie/estimation/mle.py b/folie/estimation/mle.py index b2286c7..4ac8635 100644 --- a/folie/estimation/mle.py +++ b/folie/estimation/mle.py @@ -176,7 +176,7 @@ def fit(self, data, minimizer=None, coefficients0=None, use_jac=True, callback=N do_init = not (self.warm_start and hasattr(self, "converged_")) n_init = self.n_init if do_init else 1 - max_lower_bound = -np.infty + max_lower_bound = -np.inf self.converged_ = False # That becomes duplicate of callback @@ -200,8 +200,8 @@ def fit(self, data, minimizer=None, coefficients0=None, use_jac=True, callback=N mu0 = np.zeros(self.model.dim_h) sig0 = np.identity(self.model.dim_h) self._print_verbose_msg_init_beg(init) - lower_bound = -np.infty if do_init else self.lower_bound_ - lower_bound_m_step = -np.infty + lower_bound = -np.inf if do_init else self.lower_bound_ + lower_bound_m_step = -np.inf # Algorithm loop for n_iter in range(1, self.max_iter + 1): prev_lower_bound = lower_bound diff --git a/folie/simulations/__init__.py b/folie/simulations/__init__.py index 491e19b..5037c4b 100644 --- a/folie/simulations/__init__.py +++ b/folie/simulations/__init__.py @@ -88,7 +88,7 @@ def _bias(self, xt): class ABMD_Simulator(BiasedSimulator): - def __init__(self, stepper, dt, k=1, xstop=np.infty, **kwargs): + def __init__(self, stepper, dt, k=1, xstop=np.inf, **kwargs): super().__init__(stepper, dt, **kwargs) self.xmax = None self.k = k @@ -106,7 +106,7 @@ def _bias(self, xt): class ABMD_2D_to_1DColvar_Simulator(BiasedSimulator): # user must provide both colvar function and its gradient in colvar element - def __init__(self, stepper, dt, colvar, k=1, qstop=np.infty, **kwargs): + def __init__(self, stepper, dt, colvar, k=1, qstop=np.inf, **kwargs): super().__init__(stepper, dt, **kwargs) self.qmax = None self.k = k