Skip to content

Commit

Permalink
fixed crash when handling multiple blobs as array, updated tests
Browse files Browse the repository at this point in the history
  • Loading branch information
johannesulf committed Feb 12, 2024
1 parent 93ec6ee commit 41af447
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 28 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ All notable changes to nautilus will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [1.0.1] - 2023-02-12

### Fixed
- Fixed a crash when multiple blobs per likelihood call are returned as a single array.

## [1.0.0] - 2023-02-12

### Changed
Expand Down
2 changes: 1 addition & 1 deletion nautilus/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@

__author__ = 'Johannes U. Lange'
__email__ = '[email protected]'
__version__ = '1.0.0'
__version__ = '1.0.1'
2 changes: 1 addition & 1 deletion nautilus/bounds/neural.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def compute(cls, points, log_l, log_l_min, enlarge_per_dim=1.1,

points_t = bound.outer_bound.transform(points)
score = np.zeros(len(points))
select = log_l > log_l_min
select = log_l >= log_l_min
score[select] = 0.5 * (
1 + (rankdata(log_l[select]) - 0.5) / np.sum(select))
score[~select] = 0.5 * (
Expand Down
3 changes: 2 additions & 1 deletion nautilus/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -980,7 +980,8 @@ def add_bound(self, verbose=False):
self.points.append(np.zeros((0, self.n_dim)))
self.log_l.append(np.zeros(0))
if self.blobs is not None:
self.blobs.append(np.zeros(0, dtype=self.blobs_dtype))
self.blobs.append(
np.zeros(self.blobs[-1][:0].shape, dtype=self.blobs_dtype))
else:
self.shell_log_l_min[-1] = log_l_min
return False
Expand Down
78 changes: 55 additions & 23 deletions tests/test_blobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,15 @@ def test_blobs_single(prior, dtype, vectorized, discard_exploration):

def likelihood(x):
if vectorized:
return np.ones(len(x)), (10 * x[:, 0]).astype(dtype)
return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001,
(10 * x[:, 0]).astype(dtype))
else:
return 1, dtype(10 * x[0])
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, dtype(10 * x[0])

sampler = Sampler(prior, likelihood, n_dim=2, n_live=10,
vectorized=vectorized)
sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration)
sampler = Sampler(prior, likelihood, n_dim=2, n_live=200,
vectorized=vectorized, n_networks=0)
sampler.run(f_live=0.2, n_like_max=2000,
discard_exploration=discard_exploration)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True,
equal_weight=True)
Expand All @@ -42,14 +44,16 @@ def test_blobs_multi(prior, vectorized, discard_exploration):

def likelihood(x):
if vectorized:
return (np.ones(len(x)), x[:, 0].astype(np.float32),
x[:, 1].astype(np.float32))
return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001,
x[:, 0].astype(np.float32), x[:, 1].astype(np.float32))
else:
return 1, np.float32(x[0]), np.float32(x[1])
return (-np.linalg.norm(x - 0.5, axis=-1) * 0.001,
np.float32(x[0]), np.float32(x[1]))

sampler = Sampler(prior, likelihood, n_dim=2, n_live=10,
vectorized=vectorized)
sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration)
sampler = Sampler(prior, likelihood, n_dim=2, n_live=200,
vectorized=vectorized, n_networks=0)
sampler.run(f_live=0.2, n_like_max=2000,
discard_exploration=discard_exploration)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True)

assert len(points) == len(blobs)
Expand All @@ -66,14 +70,16 @@ def test_blobs_dtype(prior, vectorized, discard_exploration):

def likelihood(x):
if vectorized:
return np.ones(len(x)), x[:, 0], x[:, 1]
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, 0], x[:, 1]
else:
return 1, x[0], x[1]
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[0], x[1]

blobs_dtype = [('a', '|S10'), ('b', np.int16)]
sampler = Sampler(prior, likelihood, n_dim=2, n_live=10,
vectorized=vectorized, blobs_dtype=blobs_dtype)
sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration)
sampler = Sampler(prior, likelihood, n_dim=2, n_live=200,
vectorized=vectorized, n_networks=0,
blobs_dtype=blobs_dtype)
sampler.run(f_live=0.2, n_like_max=2000,
discard_exploration=discard_exploration)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True)

assert len(points) == len(blobs)
Expand All @@ -91,14 +97,40 @@ def test_blobs_single_dtype(prior, vectorized, discard_exploration):

def likelihood(x):
if vectorized:
return np.ones(len(x)), x[:, 0], x[:, 1]
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, 0], x[:, 1]
else:
return 1, x[0], x[1]
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[0], x[1]

blobs_dtype = np.float32
sampler = Sampler(prior, likelihood, n_dim=2, n_live=200,
vectorized=vectorized, n_networks=0,
blobs_dtype=blobs_dtype)
sampler.run(f_live=0.2, n_like_max=2000,
discard_exploration=discard_exploration)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True)
assert len(points) == len(blobs)
assert np.all(points[:, 0].astype(blobs_dtype) == blobs[:, 0])
assert np.all(points[:, 1].astype(blobs_dtype) == blobs[:, 1])


sampler = Sampler(prior, likelihood, n_dim=2, n_live=10,
vectorized=vectorized, blobs_dtype=float)
sampler.run(f_live=1.0, n_eff=200, discard_exploration=discard_exploration)
@pytest.mark.parametrize("vectorized", [True, False])
@pytest.mark.parametrize("discard_exploration", [True, False])
def test_blobs_array(prior, vectorized, discard_exploration):
# Test that blobs work when returning multiple blobs via one array.

def likelihood(x):
if vectorized:
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:, :2]
else:
return -np.linalg.norm(x - 0.5, axis=-1) * 0.001, x[:2]

blobs_dtype = np.float32
sampler = Sampler(prior, likelihood, n_dim=2, n_live=200,
vectorized=vectorized, n_networks=0,
blobs_dtype=blobs_dtype)
sampler.run(f_live=0.2, n_like_max=2000,
discard_exploration=discard_exploration)
points, log_w, log_l, blobs = sampler.posterior(return_blobs=True)
assert len(points) == len(blobs)
assert np.all(points[:, 0] == blobs[:, 0])
assert np.all(points[:, 1] == blobs[:, 1])
assert np.all(points[:, 0].astype(blobs_dtype) == blobs[:, 0])
assert np.all(points[:, 1].astype(blobs_dtype) == blobs[:, 1])
4 changes: 2 additions & 2 deletions tests/test_sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ def likelihood(x):
sampler = Sampler(
prior, likelihood, n_dim=2, n_networks=n_networks,
vectorized=vectorized, pass_dict=pass_dict, n_live=200)
sampler.run(n_like_max=500, verbose=True)
sampler.run(n_like_max=600, verbose=True)
sampler = Sampler(
prior, likelihood, n_dim=2, n_networks=n_networks,
vectorized=vectorized, pass_dict=None, n_live=200)
sampler.run(n_like_max=500, verbose=True)
sampler.run(n_like_max=600, verbose=True)
points, log_w, log_l = sampler.posterior()
points, log_w, log_l = sampler.posterior(return_as_dict=pass_dict)
points, log_w, log_l = sampler.posterior(
Expand Down

0 comments on commit 41af447

Please sign in to comment.