diff --git a/one/api.py b/one/api.py index 1067a60..a336616 100644 --- a/one/api.py +++ b/one/api.py @@ -1979,8 +1979,8 @@ def list_datasets( del filters['default_revisions_only'] if not keep_eid_index and 'eid' in datasets.index.names: datasets = datasets.droplevel('eid') - datasets = util.filter_datasets( - datasets, assert_unique=False, wildcards=self.wildcards, **filters) + kwargs = dict(assert_unique=False, wildcards=self.wildcards, revision_last_before=False) + datasets = util.filter_datasets(datasets, **kwargs, **filters) # Return only the relative path return datasets if details else datasets['rel_path'].sort_values().values.tolist() diff --git a/one/tests/test_one.py b/one/tests/test_one.py index b2580b5..d64c69e 100644 --- a/one/tests/test_one.py +++ b/one/tests/test_one.py @@ -1511,7 +1511,7 @@ def tearDownClass(cls) -> None: @unittest.skipIf(OFFLINE_ONLY, 'online only test') class TestOneRemote(unittest.TestCase): - """Test remote queries using OpenAlyx""" + """Test remote queries using OpenAlyx.""" def setUp(self) -> None: self.one = OneAlyx(**TEST_DB_2, mode='auto') self.eid = '4ecb5d24-f5cc-402c-be28-9d0f7cb14b3a' @@ -1522,19 +1522,20 @@ def setUp(self) -> None: self.one.alyx._par = self.one.alyx._par.set('CACHE_DIR', Path(self.tempdir.name)) def test_online_repr(self): - """Tests OneAlyx.__repr__""" + """Tests OneAlyx.__repr__.""" self.assertTrue('online' in str(self.one)) self.assertTrue(TEST_DB_2['base_url'] in str(self.one)) def test_list_datasets(self): - """Test OneAlyx.list_datasets""" + """Test OneAlyx.list_datasets.""" # Test list for eid # Ensure remote by making local datasets table empty self.addCleanup(self.one.load_cache) self.one._cache['datasets'] = self.one._cache['datasets'].iloc[0:0].copy() dsets = self.one.list_datasets(self.eid, details=True, query_type='remote') - self.assertEqual(183, len(dsets)) # this may change after a BWM release or patch + expected_n_datasets = 253 # this may change after a BWM release or patch + self.assertEqual(expected_n_datasets, len(dsets)) self.assertEqual(1, dsets.index.nlevels, 'details data frame should be without eid index') # Test keep_eid_index @@ -1556,12 +1557,12 @@ def test_list_datasets(self): # Test details=False, with eid dsets = self.one.list_datasets(self.eid, details=False, query_type='remote') self.assertIsInstance(dsets, list) - self.assertEqual(183, len(dsets)) # this may change after a BWM release or patch + self.assertEqual(expected_n_datasets, len(dsets)) # Test with other filters dsets = self.one.list_datasets(self.eid, collection='*probe*', filename='*channels*', details=False, query_type='remote') - self.assertEqual(24, len(dsets)) + self.assertEqual(36, len(dsets)) self.assertTrue(all(x in y for x in ('probe', 'channels') for y in dsets)) with self.assertWarns(Warning): diff --git a/one/util.py b/one/util.py index 87f7103..cefe4bd 100644 --- a/one/util.py +++ b/one/util.py @@ -274,6 +274,7 @@ def filter_datasets( - It is not possible to match datasets that are in a given collection OR NOT in ANY collection. e.g. filter_datasets(dsets, collection=['alf', '']) will not match the latter. For this you must use two separate queries. + - It is not possible to match datasets with no revision when wildcards=True. """ # Create a regular expression string to match relative path against filename = filename or {}