diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f9957e..e19ef93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,11 @@ All notable changes to nautilus will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.0.1] - 2023-02-12 + +### Fixed +- Fixed a crash when multiple blobs per likelihood call are returned as a single array. + ## [1.0.0] - 2023-02-12 ### Changed diff --git a/nautilus/__init__.py b/nautilus/__init__.py index c3244bc..64f8a81 100644 --- a/nautilus/__init__.py +++ b/nautilus/__init__.py @@ -5,4 +5,4 @@ __author__ = 'Johannes U. Lange' __email__ = 'julange.astro@pm.me' -__version__ = '1.0.0' +__version__ = '1.0.1' diff --git a/nautilus/bounds/neural.py b/nautilus/bounds/neural.py index 37fb3a8..9508e94 100644 --- a/nautilus/bounds/neural.py +++ b/nautilus/bounds/neural.py @@ -84,7 +84,7 @@ def compute(cls, points, log_l, log_l_min, enlarge_per_dim=1.1, points_t = bound.outer_bound.transform(points) score = np.zeros(len(points)) - select = log_l > log_l_min + select = log_l >= log_l_min score[select] = 0.5 * ( 1 + (rankdata(log_l[select]) - 0.5) / np.sum(select)) score[~select] = 0.5 * ( diff --git a/nautilus/sampler.py b/nautilus/sampler.py index f6e3faf..0c02559 100644 --- a/nautilus/sampler.py +++ b/nautilus/sampler.py @@ -980,7 +980,8 @@ def add_bound(self, verbose=False): self.points.append(np.zeros((0, self.n_dim))) self.log_l.append(np.zeros(0)) if self.blobs is not None: - self.blobs.append(np.zeros(0, dtype=self.blobs_dtype)) + self.blobs.append( + np.zeros(self.blobs[-1][:0].shape, dtype=self.blobs_dtype)) else: self.shell_log_l_min[-1] = log_l_min return False diff --git a/tests/test_blobs.py b/tests/test_blobs.py index 25a1de6..dcf35a5 100644 --- a/tests/test_blobs.py +++ b/tests/test_blobs.py @@ -19,13 +19,15 @@ def test_blobs_single(prior, dtype, vectorized, discard_exploration): def likelihood(x): if vectorized: - return np.ones(len(x)), (10 * x[:, 0]).astype(dtype) + return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001, + (10 * x[:, 0]).astype(dtype)) else: - return 1, dtype(10 * x[0]) + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, dtype(10 * x[0]) - sampler = Sampler(prior, likelihood, n_dim=2, n_live=10, - vectorized=vectorized) - sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration) + sampler = Sampler(prior, likelihood, n_dim=2, n_live=200, + vectorized=vectorized, n_networks=0) + sampler.run(f_live=0.2, n_like_max=2000, + discard_exploration=discard_exploration) points, log_w, log_l, blobs = sampler.posterior(return_blobs=True) points, log_w, log_l, blobs = sampler.posterior(return_blobs=True, equal_weight=True) @@ -42,14 +44,16 @@ def test_blobs_multi(prior, vectorized, discard_exploration): def likelihood(x): if vectorized: - return (np.ones(len(x)), x[:, 0].astype(np.float32), - x[:, 1].astype(np.float32)) + return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001, + x[:, 0].astype(np.float32), x[:, 1].astype(np.float32)) else: - return 1, np.float32(x[0]), np.float32(x[1]) + return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001, + np.float32(x[0]), np.float32(x[1])) - sampler = Sampler(prior, likelihood, n_dim=2, n_live=10, - vectorized=vectorized) - sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration) + sampler = Sampler(prior, likelihood, n_dim=2, n_live=200, + vectorized=vectorized, n_networks=0) + sampler.run(f_live=0.2, n_like_max=2000, + discard_exploration=discard_exploration) points, log_w, log_l, blobs = sampler.posterior(return_blobs=True) assert len(points) == len(blobs) @@ -66,14 +70,16 @@ def test_blobs_dtype(prior, vectorized, discard_exploration): def likelihood(x): if vectorized: - return np.ones(len(x)), x[:, 0], x[:, 1] + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, 0], x[:, 1] else: - return 1, x[0], x[1] + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[0], x[1] blobs_dtype = [('a', '|S10'), ('b', np.int16)] - sampler = Sampler(prior, likelihood, n_dim=2, n_live=10, - vectorized=vectorized, blobs_dtype=blobs_dtype) - sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration) + sampler = Sampler(prior, likelihood, n_dim=2, n_live=200, + vectorized=vectorized, n_networks=0, + blobs_dtype=blobs_dtype) + sampler.run(f_live=0.2, n_like_max=2000, + discard_exploration=discard_exploration) points, log_w, log_l, blobs = sampler.posterior(return_blobs=True) assert len(points) == len(blobs) @@ -91,14 +97,40 @@ def test_blobs_single_dtype(prior, vectorized, discard_exploration): def likelihood(x): if vectorized: - return np.ones(len(x)), x[:, 0], x[:, 1] + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, 0], x[:, 1] else: - return 1, x[0], x[1] + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[0], x[1] + + blobs_dtype = np.float32 + sampler = Sampler(prior, likelihood, n_dim=2, n_live=200, + vectorized=vectorized, n_networks=0, + blobs_dtype=blobs_dtype) + sampler.run(f_live=0.2, n_like_max=2000, + discard_exploration=discard_exploration) + points, log_w, log_l, blobs = sampler.posterior(return_blobs=True) + assert len(points) == len(blobs) + assert np.all(points[:, 0].astype(blobs_dtype) == blobs[:, 0]) + assert np.all(points[:, 1].astype(blobs_dtype) == blobs[:, 1]) + - sampler = Sampler(prior, likelihood, n_dim=2, n_live=10, - vectorized=vectorized, blobs_dtype=float) - sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration) +@pytest.mark.parametrize("vectorized", [True, False]) +@pytest.mark.parametrize("discard_exploration", [True, False]) +def test_blobs_array(prior, vectorized, discard_exploration): + # Test that blobs work when returning multiple blobs via one array. + + def likelihood(x): + if vectorized: + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, :2] + else: + return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:2] + + blobs_dtype = np.float32 + sampler = Sampler(prior, likelihood, n_dim=2, n_live=200, + vectorized=vectorized, n_networks=0, + blobs_dtype=blobs_dtype) + sampler.run(f_live=0.2, n_like_max=2000, + discard_exploration=discard_exploration) points, log_w, log_l, blobs = sampler.posterior(return_blobs=True) assert len(points) == len(blobs) - assert np.all(points[:, 0] == blobs[:, 0]) - assert np.all(points[:, 1] == blobs[:, 1]) + assert np.all(points[:, 0].astype(blobs_dtype) == blobs[:, 0]) + assert np.all(points[:, 1].astype(blobs_dtype) == blobs[:, 1]) diff --git a/tests/test_sampler.py b/tests/test_sampler.py index a23655f..d3fadfc 100644 --- a/tests/test_sampler.py +++ b/tests/test_sampler.py @@ -29,11 +29,11 @@ def likelihood(x): sampler = Sampler( prior, likelihood, n_dim=2, n_networks=n_networks, vectorized=vectorized, pass_dict=pass_dict, n_live=200) - sampler.run(n_like_max=500, verbose=True) + sampler.run(n_like_max=600, verbose=True) sampler = Sampler( prior, likelihood, n_dim=2, n_networks=n_networks, vectorized=vectorized, pass_dict=None, n_live=200) - sampler.run(n_like_max=500, verbose=True) + sampler.run(n_like_max=600, verbose=True) points, log_w, log_l = sampler.posterior() points, log_w, log_l = sampler.posterior(return_as_dict=pass_dict) points, log_w, log_l = sampler.posterior(