diff --git a/vamb/encode.py b/vamb/encode.py index 10f6fa76..0a252e94 100644 --- a/vamb/encode.py +++ b/vamb/encode.py @@ -514,7 +514,9 @@ def load( """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=True) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=True + ) nsamples = dictionary["nsamples"] alpha = dictionary["alpha"] diff --git a/vamb/semisupervised_encode.py b/vamb/semisupervised_encode.py index d98bfdc9..f106ca79 100644 --- a/vamb/semisupervised_encode.py +++ b/vamb/semisupervised_encode.py @@ -1125,7 +1125,9 @@ def load(cls, path, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=False + ) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"] diff --git a/vamb/taxvamb_encode.py b/vamb/taxvamb_encode.py index 80970be6..45087f2c 100644 --- a/vamb/taxvamb_encode.py +++ b/vamb/taxvamb_encode.py @@ -630,7 +630,9 @@ def load(cls, path, nodes, table_parent, cuda=False, evaluate=True): """ # Forcably load to CPU even if model was saves as GPU model - dictionary = _torch.load(path, map_location=lambda storage, loc: storage, weights_only=False) + dictionary = _torch.load( + path, map_location=lambda storage, loc: storage, weights_only=False + ) nsamples = dictionary["nsamples"] nlabels = dictionary["nlabels"]