From 1b0ce535d4462e19ff83054466410c8ccefcff37 Mon Sep 17 00:00:00 2001 From: Olivier Date: Fri, 7 Mar 2025 13:52:36 +0100 Subject: [PATCH] :zap: Update ruff and lint --- pyproject.toml | 2 +- .../datasets/classification/cifar/cifar_c.py | 2 +- .../datasets/classification/cifar/cifar_h.py | 6 +- .../datasets/classification/cifar/cifar_n.py | 4 +- .../datasets/classification/cub.py | 2 +- .../datasets/classification/imagenet/base.py | 2 +- .../datasets/classification/not_mnist.py | 2 +- torch_uncertainty/datasets/fractals.py | 2 +- torch_uncertainty/datasets/frost.py | 2 +- torch_uncertainty/datasets/muad.py | 2 +- .../datasets/regression/uci_regression.py | 2 +- .../datasets/segmentation/camvid.py | 4 +- torch_uncertainty/layers/batch_ensemble.py | 2 +- .../layers/bayesian/bayes_conv.py | 4 +- .../layers/filter_response_norm.py | 4 +- torch_uncertainty/layers/functional/packed.py | 66 +++++++++---------- torch_uncertainty/layers/packed.py | 12 ++-- torch_uncertainty/losses/bayesian.py | 6 +- torch_uncertainty/losses/classification.py | 12 ++-- torch_uncertainty/losses/regression.py | 4 +- .../metrics/classification/fpr.py | 2 +- .../post_processing/calibration/scaler.py | 2 +- .../post_processing/mc_batch_norm.py | 2 +- torch_uncertainty/routines/classification.py | 6 +- .../routines/pixel_regression.py | 4 +- torch_uncertainty/routines/regression.py | 4 +- torch_uncertainty/routines/segmentation.py | 2 +- torch_uncertainty/transforms/corruption.py | 26 ++++---- torch_uncertainty/utils/distributions.py | 4 +- 29 files changed, 96 insertions(+), 98 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f5cbd2ab..661ad447 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,7 @@ dev = [ "torch_uncertainty[image]", "huggingface-hub", "safetensors", - "ruff==0.7.4", + "ruff==0.9.9", "pytest-cov", "pre-commit", "pre-commit-hooks", diff --git a/torch_uncertainty/datasets/classification/cifar/cifar_c.py b/torch_uncertainty/datasets/classification/cifar/cifar_c.py index ebafe7f8..b9020b8c 100644 --- a/torch_uncertainty/datasets/classification/cifar/cifar_c.py +++ b/torch_uncertainty/datasets/classification/cifar/cifar_c.py @@ -111,7 +111,7 @@ def __init__( if shift_severity not in list(range(1, 6)): raise ValueError( - "Corruptions shift_severity should be chosen between 1 and 5 " "included." + "Corruptions shift_severity should be chosen between 1 and 5 included." ) samples, labels = self.make_dataset(self.root, self.subset, self.shift_severity) diff --git a/torch_uncertainty/datasets/classification/cifar/cifar_h.py b/torch_uncertainty/datasets/classification/cifar/cifar_h.py index f8354a0b..4cbbecb1 100644 --- a/torch_uncertainty/datasets/classification/cifar/cifar_h.py +++ b/torch_uncertainty/datasets/classification/cifar/cifar_h.py @@ -27,7 +27,7 @@ class CIFAR10H(CIFAR10): """ h_test_list = ["cifar-10h-probs.npy", "7b41f73eee90fdefc73bfc820ab29ba8"] - h_url = "https://github.com/jcpeterson/cifar-10h/raw/master/data/" "cifar10h-probs.npy" + h_url = "https://github.com/jcpeterson/cifar-10h/raw/master/data/cifar10h-probs.npy" def __init__( self, @@ -39,7 +39,7 @@ def __init__( ) -> None: if train: raise ValueError("CIFAR10H does not support training data.") - print("WARNING: CIFAR10H cannot be used with Classification routines " "for now.") + print("WARNING: CIFAR10H cannot be used within Classification routines for now.") super().__init__( Path(root), train=False, @@ -53,7 +53,7 @@ def __init__( if not self._check_specific_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) self.targets = list(torch.as_tensor(np.load(self.root / self.h_test_list[0]))) diff --git a/torch_uncertainty/datasets/classification/cifar/cifar_n.py b/torch_uncertainty/datasets/classification/cifar/cifar_n.py index 6f6f8c95..e8193d68 100644 --- a/torch_uncertainty/datasets/classification/cifar/cifar_n.py +++ b/torch_uncertainty/datasets/classification/cifar/cifar_n.py @@ -61,7 +61,7 @@ def __init__( if not self._check_specific_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) self.targets = list(torch.load(self.root / self.filename)[file_arg]) @@ -112,7 +112,7 @@ def __init__( if not self._check_specific_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) self.targets = list(torch.load(self.root / self.filename)[file_arg]) diff --git a/torch_uncertainty/datasets/classification/cub.py b/torch_uncertainty/datasets/classification/cub.py index 079d20ec..38bacf70 100644 --- a/torch_uncertainty/datasets/classification/cub.py +++ b/torch_uncertainty/datasets/classification/cub.py @@ -51,7 +51,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) super().__init__(Path(root) / "CUB_200_2011" / "images", transform, target_transform) diff --git a/torch_uncertainty/datasets/classification/imagenet/base.py b/torch_uncertainty/datasets/classification/imagenet/base.py index 722955cf..4e5ed41a 100644 --- a/torch_uncertainty/datasets/classification/imagenet/base.py +++ b/torch_uncertainty/datasets/classification/imagenet/base.py @@ -52,7 +52,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. " "You can use download=True to download it." + "Dataset not found or corrupted. You can use download=True to download it." ) super().__init__( diff --git a/torch_uncertainty/datasets/classification/not_mnist.py b/torch_uncertainty/datasets/classification/not_mnist.py index e0b28ae0..ff3e364b 100644 --- a/torch_uncertainty/datasets/classification/not_mnist.py +++ b/torch_uncertainty/datasets/classification/not_mnist.py @@ -60,7 +60,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) super().__init__( diff --git a/torch_uncertainty/datasets/fractals.py b/torch_uncertainty/datasets/fractals.py index 329a2086..8153f2a0 100644 --- a/torch_uncertainty/datasets/fractals.py +++ b/torch_uncertainty/datasets/fractals.py @@ -40,7 +40,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) super().__init__(self.root, transform=transform, target_transform=target_transform) diff --git a/torch_uncertainty/datasets/frost.py b/torch_uncertainty/datasets/frost.py index dbde6d89..6aa069bb 100644 --- a/torch_uncertainty/datasets/frost.py +++ b/torch_uncertainty/datasets/frost.py @@ -44,7 +44,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. You can use download=True to " "download it." + "Dataset not found or corrupted. You can use download=True to download it." ) super().__init__( diff --git a/torch_uncertainty/datasets/muad.py b/torch_uncertainty/datasets/muad.py index 73ea2ba7..cc93c3d9 100644 --- a/torch_uncertainty/datasets/muad.py +++ b/torch_uncertainty/datasets/muad.py @@ -232,7 +232,7 @@ def _make_dataset(self, path: Path) -> None: """ if "depth" in path.name: raise NotImplementedError( - "Depth mode is not implemented yet. Raise an issue " "if you need it." + "Depth mode is not implemented yet. Raise an issue if you need it." ) self.samples = sorted((path / "leftImg8bit/").glob("**/*")) if self.target_type == "semantic": diff --git a/torch_uncertainty/datasets/regression/uci_regression.py b/torch_uncertainty/datasets/regression/uci_regression.py index a2c50bc3..502b9a1e 100644 --- a/torch_uncertainty/datasets/regression/uci_regression.py +++ b/torch_uncertainty/datasets/regression/uci_regression.py @@ -191,7 +191,7 @@ def download(self) -> None: logging.info("Files already downloaded and verified") return if self.url is None: - raise ValueError(f"The dataset {self.dataset_name} is not available for " "download.") + raise ValueError(f"The dataset {self.dataset_name} is not available for download.") download_root = self.root / self.root_appendix / self.dataset_name if self.dataset_name == "boston": download_url( diff --git a/torch_uncertainty/datasets/segmentation/camvid.py b/torch_uncertainty/datasets/segmentation/camvid.py index 937b83f7..ec9b8e4c 100644 --- a/torch_uncertainty/datasets/segmentation/camvid.py +++ b/torch_uncertainty/datasets/segmentation/camvid.py @@ -133,7 +133,7 @@ def __init__( """ if split not in ["train", "val", "test", None]: raise ValueError( - f"Unknown split '{split}'. " "Supported splits are ['train', 'val', 'test', None]" + f"Unknown split '{split}'. Supported splits are ['train', 'val', 'test', None]" ) super().__init__(root, transforms, None, None) @@ -153,7 +153,7 @@ def __init__( if not self._check_integrity(): raise RuntimeError( - "Dataset not found or corrupted. " "You can use download=True to download it" + "Dataset not found or corrupted. You can use download=True to download it" ) # get filenames for split diff --git a/torch_uncertainty/layers/batch_ensemble.py b/torch_uncertainty/layers/batch_ensemble.py index dde72139..a1ce068a 100644 --- a/torch_uncertainty/layers/batch_ensemble.py +++ b/torch_uncertainty/layers/batch_ensemble.py @@ -143,7 +143,7 @@ def forward(self, inputs: Tensor) -> Tensor: def extra_repr(self) -> str: return ( - f"in_features={ self.in_features}," + f"in_features={self.in_features}," f" out_features={self.out_features}," f" num_estimators={self.num_estimators}," f" bias={self.bias is not None}" diff --git a/torch_uncertainty/layers/bayesian/bayes_conv.py b/torch_uncertainty/layers/bayesian/bayes_conv.py index e328aa39..f1277ed4 100644 --- a/torch_uncertainty/layers/bayesian/bayes_conv.py +++ b/torch_uncertainty/layers/bayesian/bayes_conv.py @@ -89,7 +89,7 @@ def __init__( if transposed: raise NotImplementedError( - "Bayesian transposed convolution not implemented yet. Raise an" " issue if needed." + "Bayesian transposed convolution not implemented yet. Raise an issue if needed." ) self.in_channels = in_channels @@ -164,7 +164,7 @@ def sample(self) -> tuple[Tensor, Tensor | None]: return weight, bias def extra_repr(self) -> str: # coverage: ignore - s = "{in_channels}, {out_channels}, kernel_size={kernel_size}" ", stride={stride}" + s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}" if self.padding != (0,) * len(self.padding): s += ", padding={padding}" if self.dilation != (1,) * len(self.dilation): diff --git a/torch_uncertainty/layers/filter_response_norm.py b/torch_uncertainty/layers/filter_response_norm.py index ef306a8f..0f22717d 100644 --- a/torch_uncertainty/layers/filter_response_norm.py +++ b/torch_uncertainty/layers/filter_response_norm.py @@ -23,13 +23,13 @@ def __init__( super().__init__() if dimension < 1 or not isinstance(dimension, int): raise ValueError( - "dimension should be an integer greater or equal than 1. " f"got {dimension}." + f"dimension should be an integer greater or equal than 1. Got {dimension}." ) self.dimension = dimension if num_channels < 1 or not isinstance(num_channels, int): raise ValueError( - "num_channels should be an integer greater or equal than 1. " f"got {num_channels}." + f"num_channels should be an integer greater or equal than 1. Got {num_channels}." ) shape = (1, num_channels) + (1,) * dimension self.eps = eps diff --git a/torch_uncertainty/layers/functional/packed.py b/torch_uncertainty/layers/functional/packed.py index c962531e..a5ab40f9 100644 --- a/torch_uncertainty/layers/functional/packed.py +++ b/torch_uncertainty/layers/functional/packed.py @@ -79,15 +79,15 @@ def packed_in_projection( emb_q // num_groups, emb_v // num_groups, ), f"expecting value weights shape of {(emb_q, emb_v)}, but got {w_v.shape}" - assert b_q is None or b_q.shape == ( - emb_q, - ), f"expecting query bias shape of {(emb_q,)}, but got {b_q.shape}" - assert b_k is None or b_k.shape == ( - emb_q, - ), f"expecting key bias shape of {(emb_k,)}, but got {b_k.shape}" - assert b_v is None or b_v.shape == ( - emb_q, - ), f"expecting value bias shape of {(emb_v,)}, but got {b_v.shape}" + assert b_q is None or b_q.shape == (emb_q,), ( + f"expecting query bias shape of {(emb_q,)}, but got {b_q.shape}" + ) + assert b_k is None or b_k.shape == (emb_q,), ( + f"expecting key bias shape of {(emb_k,)}, but got {b_k.shape}" + ) + assert b_v is None or b_v.shape == (emb_q,), ( + f"expecting value bias shape of {(emb_v,)}, but got {b_v.shape}" + ) return ( packed_linear(q, w_q, num_groups, implementation, b_q), @@ -324,47 +324,47 @@ def packed_multi_head_attention_forward( # noqa: D417 # longer causal. is_causal = False - assert ( - embed_dim == embed_dim_to_check - ), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" + assert embed_dim == embed_dim_to_check, ( + f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" + ) if isinstance(embed_dim, Tensor): # embed_dim can be a tensor when JIT tracing head_dim = embed_dim.div(num_heads, rounding_mode="trunc") else: head_dim = embed_dim // num_heads - assert ( - head_dim * num_heads == embed_dim - ), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" + assert head_dim * num_heads == embed_dim, ( + f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" + ) if use_separate_proj_weight: # allow MHA to have different embedding dimensions when separate projection weights are used - assert ( - key.shape[:2] == value.shape[:2] - ), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" + assert key.shape[:2] == value.shape[:2], ( + f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" + ) else: - assert ( - key.shape == value.shape - ), f"key shape {key.shape} does not match value shape {value.shape}" + assert key.shape == value.shape, ( + f"key shape {key.shape} does not match value shape {value.shape}" + ) # # compute in-projection # if not use_separate_proj_weight: - assert ( - in_proj_weight is not None - ), "use_separate_proj_weight is False but in_proj_weight is None" + assert in_proj_weight is not None, ( + "use_separate_proj_weight is False but in_proj_weight is None" + ) q, k, v = packed_in_projection_packed( q=query, k=key, v=value, w=in_proj_weight, num_groups=num_groups, b=in_proj_bias ) else: - assert ( - q_proj_weight is not None - ), "use_separate_proj_weight is True but q_proj_weight is None" - assert ( - k_proj_weight is not None - ), "use_separate_proj_weight is True but k_proj_weight is None" - assert ( - v_proj_weight is not None - ), "use_separate_proj_weight is True but v_proj_weight is None" + assert q_proj_weight is not None, ( + "use_separate_proj_weight is True but q_proj_weight is None" + ) + assert k_proj_weight is not None, ( + "use_separate_proj_weight is True but k_proj_weight is None" + ) + assert v_proj_weight is not None, ( + "use_separate_proj_weight is True but v_proj_weight is None" + ) if in_proj_bias is None: b_q = b_k = b_v = None else: diff --git a/torch_uncertainty/layers/packed.py b/torch_uncertainty/layers/packed.py index 71537b82..647390a7 100644 --- a/torch_uncertainty/layers/packed.py +++ b/torch_uncertainty/layers/packed.py @@ -33,11 +33,9 @@ def check_packed_parameters_consistency(alpha: float, gamma: int, num_estimators if num_estimators is None: raise ValueError("You must specify the value of the arg. `num_estimators`") if not isinstance(num_estimators, int): - raise TypeError( - "Attribute `num_estimators` should be an int, not " f"{type(num_estimators)}" - ) + raise TypeError(f"Attribute `num_estimators` should be an int, not {type(num_estimators)}") if num_estimators <= 0: - raise ValueError("Attribute `num_estimators` should be >= 1, not " f"{num_estimators}") + raise ValueError(f"Attribute `num_estimators` should be >= 1, not {num_estimators}") class PackedLinear(nn.Module): @@ -697,9 +695,9 @@ def __init__( self.dropout = dropout self.batch_first = batch_first self.head_dim = self.embed_dim // self.num_heads - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" + assert self.head_dim * self.num_heads == self.embed_dim, ( + "embed_dim must be divisible by num_heads" + ) self.num_estimators = num_estimators self.alpha = alpha diff --git a/torch_uncertainty/losses/bayesian.py b/torch_uncertainty/losses/bayesian.py index 3e0e632e..4af6d77b 100644 --- a/torch_uncertainty/losses/bayesian.py +++ b/torch_uncertainty/losses/bayesian.py @@ -98,12 +98,12 @@ def set_model(self, model: nn.Module | None) -> None: def _elbo_loss_checks(inner_loss: nn.Module, kl_weight: float, num_samples: int) -> None: if isinstance(inner_loss, type): - raise TypeError("The inner_loss should be an instance of a class." f"Got {inner_loss}.") + raise TypeError(f"The inner_loss should be an instance of a class.Got {inner_loss}.") if kl_weight < 0: raise ValueError(f"The KL weight should be non-negative. Got {kl_weight}.") if num_samples < 1: - raise ValueError("The number of samples should not be lower than 1." f"Got {num_samples}.") + raise ValueError(f"The number of samples should not be lower than 1. Got {num_samples}.") if not isinstance(num_samples, int): - raise TypeError("The number of samples should be an integer. " f"Got {type(num_samples)}.") + raise TypeError(f"The number of samples should be an integer. Got {type(num_samples)}.") diff --git a/torch_uncertainty/losses/classification.py b/torch_uncertainty/losses/classification.py index 82c48280..96769338 100644 --- a/torch_uncertainty/losses/classification.py +++ b/torch_uncertainty/losses/classification.py @@ -31,12 +31,12 @@ def __init__( if reg_weight is not None and (reg_weight < 0): raise ValueError( - "The regularization weight should be non-negative, but got " f"{reg_weight}." + f"The regularization weight should be non-negative, but got {reg_weight}." ) self.reg_weight = reg_weight if annealing_step is not None and (annealing_step <= 0): - raise ValueError("The annealing step should be positive, but got " f"{annealing_step}.") + raise ValueError(f"The annealing step should be positive, but got {annealing_step}.") self.annealing_step = annealing_step if reduction not in ("none", "mean", "sum") and reduction is not None: @@ -178,11 +178,11 @@ def __init__( self.reduction = reduction if eps < 0: - raise ValueError("The epsilon value should be non-negative, but got " f"{eps}.") + raise ValueError(f"The epsilon value should be non-negative, but got {eps}.") self.eps = eps if reg_weight < 0: raise ValueError( - "The regularization weight should be non-negative, but got " f"{reg_weight}." + f"The regularization weight should be non-negative, but got {reg_weight}." ) self.reg_weight = reg_weight @@ -233,7 +233,7 @@ def __init__( self.reduction = reduction if reg_weight < 0: raise ValueError( - "The regularization weight should be non-negative, but got " f"{reg_weight}." + f"The regularization weight should be non-negative, but got {reg_weight}." ) self.reg_weight = reg_weight @@ -287,7 +287,7 @@ def __init__( if gamma < 0: raise ValueError( - "The gamma term of the focal loss should be non-negative, but got " f"{gamma}." + f"The gamma term of the focal loss should be non-negative, but got {gamma}." ) self.gamma = gamma diff --git a/torch_uncertainty/losses/regression.py b/torch_uncertainty/losses/regression.py index 888de286..479f7ff5 100644 --- a/torch_uncertainty/losses/regression.py +++ b/torch_uncertainty/losses/regression.py @@ -67,7 +67,7 @@ def __init__(self, reg_weight: float, reduction: str | None = "mean") -> None: if reg_weight < 0: raise ValueError( - "The regularization weight should be non-negative, but got " f"{reg_weight}." + f"The regularization weight should be non-negative, but got {reg_weight}." ) self.reg_weight = reg_weight @@ -114,7 +114,7 @@ def __init__(self, beta: float = 0.5, reduction: str | None = "mean") -> None: super().__init__() if beta < 0 or beta > 1: - raise ValueError("The beta parameter should be in range [0, 1], but got " f"{beta}.") + raise ValueError(f"The beta parameter should be in range [0, 1], but got {beta}.") self.beta = beta self.nll_loss = nn.GaussianNLLLoss(reduction="none") if reduction not in ("none", "mean", "sum"): diff --git a/torch_uncertainty/metrics/classification/fpr.py b/torch_uncertainty/metrics/classification/fpr.py index 53e3e779..d0779b44 100644 --- a/torch_uncertainty/metrics/classification/fpr.py +++ b/torch_uncertainty/metrics/classification/fpr.py @@ -35,7 +35,7 @@ def __init__(self, recall_level: float, pos_label: int, **kwargs) -> None: self.add_state("targets", [], dist_reduce_fx="cat") rank_zero_warn( - f"Metric `FPR{int(recall_level*100)}` will save all targets and predictions" + f"Metric `FPR{int(recall_level * 100)}` will save all targets and predictions" " in buffer. For large datasets this may lead to large memory" " footprint." ) diff --git a/torch_uncertainty/post_processing/calibration/scaler.py b/torch_uncertainty/post_processing/calibration/scaler.py index a3e90eed..a5398df2 100644 --- a/torch_uncertainty/post_processing/calibration/scaler.py +++ b/torch_uncertainty/post_processing/calibration/scaler.py @@ -94,7 +94,7 @@ def calib_eval() -> float: def forward(self, inputs: Tensor) -> Tensor: if not self.trained: logging.error( - "TemperatureScaler has not been trained yet. Returning " "manually tempered inputs." + "TemperatureScaler has not been trained yet. Returning manually tempered inputs." ) return self._scale(self.model(inputs)) diff --git a/torch_uncertainty/post_processing/mc_batch_norm.py b/torch_uncertainty/post_processing/mc_batch_norm.py index 5357d954..0a1d250d 100644 --- a/torch_uncertainty/post_processing/mc_batch_norm.py +++ b/torch_uncertainty/post_processing/mc_batch_norm.py @@ -168,4 +168,4 @@ def _mcbn_checks(model, num_estimators, mc_batch_size, convert): if mc_batch_size < 1 or not isinstance(mc_batch_size, int): raise ValueError(f"mc_batch_size must be a positive integer, got {mc_batch_size}.") if not convert and not has_mcbn(model): - raise ValueError("model does not contain any MCBatchNorm2d nor is not to be " "converted.") + raise ValueError("model does not contain any MCBatchNorm2d nor is not to be converted.") diff --git a/torch_uncertainty/routines/classification.py b/torch_uncertainty/routines/classification.py index 8487f93f..7c752a11 100644 --- a/torch_uncertainty/routines/classification.py +++ b/torch_uncertainty/routines/classification.py @@ -717,7 +717,7 @@ def _classification_routine_checks( if not is_ensemble and ood_criterion in ["mi", "vr"]: raise ValueError( - "You cannot use mutual information or variation ratio with a single" " model." + "You cannot use mutual information or variation ratio with a single model." ) if is_ensemble and eval_grouping_loss: @@ -727,12 +727,12 @@ def _classification_routine_checks( if num_classes < 1: raise ValueError( - "The number of classes must be a positive integer >= 1." f"Got {num_classes}." + f"The number of classes must be a positive integer >= 1. Got {num_classes}." ) if eval_grouping_loss and not hasattr(model, "feats_forward"): raise ValueError( - "Your model must have a `feats_forward` method to compute the " "grouping loss." + "Your model must have a `feats_forward` method to compute the grouping loss." ) if eval_grouping_loss and not ( diff --git a/torch_uncertainty/routines/pixel_regression.py b/torch_uncertainty/routines/pixel_regression.py index 3fa450f2..92b9b9da 100644 --- a/torch_uncertainty/routines/pixel_regression.py +++ b/torch_uncertainty/routines/pixel_regression.py @@ -86,7 +86,7 @@ def __init__( _depth_routine_checks(output_dim, num_image_plot, log_plots) if eval_shift: raise NotImplementedError( - "Distribution shift evaluation not implemented yet. Raise an issue " "if needed." + "Distribution shift evaluation not implemented yet. Raise an issue if needed." ) self.model = model @@ -294,7 +294,7 @@ def test_step( """ if dataloader_idx != 0: raise NotImplementedError( - "Depth OOD detection not implemented yet. Raise an issue " "if needed." + "Depth OOD detection not implemented yet. Raise an issue if needed." ) inputs, targets = batch if self.one_dim_depth: diff --git a/torch_uncertainty/routines/regression.py b/torch_uncertainty/routines/regression.py index 07a43bb8..88514d39 100644 --- a/torch_uncertainty/routines/regression.py +++ b/torch_uncertainty/routines/regression.py @@ -76,7 +76,7 @@ def __init__( _regression_routine_checks(output_dim) if eval_shift: raise NotImplementedError( - "Distribution shift evaluation not implemented yet. Raise an issue " "if needed." + "Distribution shift evaluation not implemented yet. Raise an issue if needed." ) self.model = model @@ -271,7 +271,7 @@ def test_step( """ if dataloader_idx != 0: raise NotImplementedError( - "Regression OOD detection not implemented yet. Raise an issue " "if needed." + "Regression OOD detection not implemented yet. Raise an issue if needed." ) inputs, targets = batch diff --git a/torch_uncertainty/routines/segmentation.py b/torch_uncertainty/routines/segmentation.py index ba08e367..fac59ecb 100644 --- a/torch_uncertainty/routines/segmentation.py +++ b/torch_uncertainty/routines/segmentation.py @@ -76,7 +76,7 @@ def __init__( ) if eval_shift: raise NotImplementedError( - "Distribution shift evaluation not implemented yet. Raise an issue " "if needed." + "Distribution shift evaluation not implemented yet. Raise an issue if needed." ) self.model = model diff --git a/torch_uncertainty/transforms/corruption.py b/torch_uncertainty/transforms/corruption.py index ffa5f491..2c9f59bd 100644 --- a/torch_uncertainty/transforms/corruption.py +++ b/torch_uncertainty/transforms/corruption.py @@ -66,24 +66,24 @@ def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0): from .image import Saturation as ISaturation __all__ = [ - "GaussianNoise", - "ShotNoise", - "ImpulseNoise", - "DefocusBlur", - "GlassBlur", - "MotionBlur", - "ZoomBlur", - "Snow", - "Frost", - "Fog", "Brightness", "Contrast", + "DefocusBlur", "Elastic", - "Pixelate", - "JPEGCompression", + "Fog", + "Frost", "GaussianBlur", - "SpeckleNoise", + "GaussianNoise", + "GlassBlur", + "ImpulseNoise", + "JPEGCompression", + "MotionBlur", + "Pixelate", "Saturation", + "ShotNoise", + "Snow", + "SpeckleNoise", + "ZoomBlur", "corruption_transforms", ] diff --git a/torch_uncertainty/utils/distributions.py b/torch_uncertainty/utils/distributions.py index 3e115e93..860b96d5 100644 --- a/torch_uncertainty/utils/distributions.py +++ b/torch_uncertainty/utils/distributions.py @@ -33,7 +33,7 @@ def get_dist_class(dist_family: str) -> type[Distribution]: if dist_family == "student": return StudentT raise NotImplementedError( - f"{dist_family} distribution is not supported." "Raise an issue if needed." + f"{dist_family} distribution is not supported. Raise an issue if needed." ) @@ -52,7 +52,7 @@ def get_dist_estimate(dist: Distribution, dist_estimate: str) -> Tensor: if dist_estimate == "mode": return dist.mode raise NotImplementedError( - f"{dist_estimate} estimate is not supported." "Raise an issue if needed." + f"{dist_estimate} estimate is not supported.Raise an issue if needed." )