Skip to content

Commit

Permalink
⚡ Update ruff and lint
Browse files Browse the repository at this point in the history
  • Loading branch information
o-laurent committed Mar 7, 2025
1 parent b3c1687 commit 1b0ce53
Show file tree
Hide file tree
Showing 29 changed files with 96 additions and 98 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ dev = [
"torch_uncertainty[image]",
"huggingface-hub",
"safetensors",
"ruff==0.7.4",
"ruff==0.9.9",
"pytest-cov",
"pre-commit",
"pre-commit-hooks",
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/classification/cifar/cifar_c.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def __init__(

if shift_severity not in list(range(1, 6)):
raise ValueError(
"Corruptions shift_severity should be chosen between 1 and 5 " "included."
"Corruptions shift_severity should be chosen between 1 and 5 included."
)
samples, labels = self.make_dataset(self.root, self.subset, self.shift_severity)

Expand Down
6 changes: 3 additions & 3 deletions torch_uncertainty/datasets/classification/cifar/cifar_h.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class CIFAR10H(CIFAR10):
"""

h_test_list = ["cifar-10h-probs.npy", "7b41f73eee90fdefc73bfc820ab29ba8"]
h_url = "https://github.com/jcpeterson/cifar-10h/raw/master/data/" "cifar10h-probs.npy"
h_url = "https://github.com/jcpeterson/cifar-10h/raw/master/data/cifar10h-probs.npy"

def __init__(
self,
Expand All @@ -39,7 +39,7 @@ def __init__(
) -> None:
if train:
raise ValueError("CIFAR10H does not support training data.")
print("WARNING: CIFAR10H cannot be used with Classification routines " "for now.")
print("WARNING: CIFAR10H cannot be used within Classification routines for now.")
super().__init__(
Path(root),
train=False,
Expand All @@ -53,7 +53,7 @@ def __init__(

if not self._check_specific_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

self.targets = list(torch.as_tensor(np.load(self.root / self.h_test_list[0])))
Expand Down
4 changes: 2 additions & 2 deletions torch_uncertainty/datasets/classification/cifar/cifar_n.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def __init__(

if not self._check_specific_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

self.targets = list(torch.load(self.root / self.filename)[file_arg])
Expand Down Expand Up @@ -112,7 +112,7 @@ def __init__(

if not self._check_specific_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

self.targets = list(torch.load(self.root / self.filename)[file_arg])
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/classification/cub.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

super().__init__(Path(root) / "CUB_200_2011" / "images", transform, target_transform)
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/classification/imagenet/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. " "You can use download=True to download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/classification/not_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/fractals.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

super().__init__(self.root, transform=transform, target_transform=target_transform)
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/frost.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. You can use download=True to " "download it."
"Dataset not found or corrupted. You can use download=True to download it."
)

super().__init__(
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/muad.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def _make_dataset(self, path: Path) -> None:
"""
if "depth" in path.name:
raise NotImplementedError(
"Depth mode is not implemented yet. Raise an issue " "if you need it."
"Depth mode is not implemented yet. Raise an issue if you need it."
)
self.samples = sorted((path / "leftImg8bit/").glob("**/*"))
if self.target_type == "semantic":
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/datasets/regression/uci_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def download(self) -> None:
logging.info("Files already downloaded and verified")
return
if self.url is None:
raise ValueError(f"The dataset {self.dataset_name} is not available for " "download.")
raise ValueError(f"The dataset {self.dataset_name} is not available for download.")
download_root = self.root / self.root_appendix / self.dataset_name
if self.dataset_name == "boston":
download_url(
Expand Down
4 changes: 2 additions & 2 deletions torch_uncertainty/datasets/segmentation/camvid.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def __init__(
"""
if split not in ["train", "val", "test", None]:
raise ValueError(
f"Unknown split '{split}'. " "Supported splits are ['train', 'val', 'test', None]"
f"Unknown split '{split}'. Supported splits are ['train', 'val', 'test', None]"
)

super().__init__(root, transforms, None, None)
Expand All @@ -153,7 +153,7 @@ def __init__(

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. " "You can use download=True to download it"
"Dataset not found or corrupted. You can use download=True to download it"
)

# get filenames for split
Expand Down
2 changes: 1 addition & 1 deletion torch_uncertainty/layers/batch_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def forward(self, inputs: Tensor) -> Tensor:

def extra_repr(self) -> str:
return (
f"in_features={ self.in_features},"
f"in_features={self.in_features},"
f" out_features={self.out_features},"
f" num_estimators={self.num_estimators},"
f" bias={self.bias is not None}"
Expand Down
4 changes: 2 additions & 2 deletions torch_uncertainty/layers/bayesian/bayes_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def __init__(

if transposed:
raise NotImplementedError(
"Bayesian transposed convolution not implemented yet. Raise an" " issue if needed."
"Bayesian transposed convolution not implemented yet. Raise an issue if needed."
)

self.in_channels = in_channels
Expand Down Expand Up @@ -164,7 +164,7 @@ def sample(self) -> tuple[Tensor, Tensor | None]:
return weight, bias

def extra_repr(self) -> str: # coverage: ignore
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}" ", stride={stride}"
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
Expand Down
4 changes: 2 additions & 2 deletions torch_uncertainty/layers/filter_response_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ def __init__(
super().__init__()
if dimension < 1 or not isinstance(dimension, int):
raise ValueError(
"dimension should be an integer greater or equal than 1. " f"got {dimension}."
f"dimension should be an integer greater or equal than 1. Got {dimension}."
)
self.dimension = dimension

if num_channels < 1 or not isinstance(num_channels, int):
raise ValueError(
"num_channels should be an integer greater or equal than 1. " f"got {num_channels}."
f"num_channels should be an integer greater or equal than 1. Got {num_channels}."
)
shape = (1, num_channels) + (1,) * dimension
self.eps = eps
Expand Down
66 changes: 33 additions & 33 deletions torch_uncertainty/layers/functional/packed.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,15 +79,15 @@ def packed_in_projection(
emb_q // num_groups,
emb_v // num_groups,
), f"expecting value weights shape of {(emb_q, emb_v)}, but got {w_v.shape}"
assert b_q is None or b_q.shape == (
emb_q,
), f"expecting query bias shape of {(emb_q,)}, but got {b_q.shape}"
assert b_k is None or b_k.shape == (
emb_q,
), f"expecting key bias shape of {(emb_k,)}, but got {b_k.shape}"
assert b_v is None or b_v.shape == (
emb_q,
), f"expecting value bias shape of {(emb_v,)}, but got {b_v.shape}"
assert b_q is None or b_q.shape == (emb_q,), (
f"expecting query bias shape of {(emb_q,)}, but got {b_q.shape}"
)
assert b_k is None or b_k.shape == (emb_q,), (
f"expecting key bias shape of {(emb_k,)}, but got {b_k.shape}"
)
assert b_v is None or b_v.shape == (emb_q,), (
f"expecting value bias shape of {(emb_v,)}, but got {b_v.shape}"
)

return (
packed_linear(q, w_q, num_groups, implementation, b_q),
Expand Down Expand Up @@ -324,47 +324,47 @@ def packed_multi_head_attention_forward( # noqa: D417
# longer causal.
is_causal = False

assert (
embed_dim == embed_dim_to_check
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
assert embed_dim == embed_dim_to_check, (
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
)
if isinstance(embed_dim, Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode="trunc")
else:
head_dim = embed_dim // num_heads
assert (
head_dim * num_heads == embed_dim
), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
assert head_dim * num_heads == embed_dim, (
f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
)
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert (
key.shape[:2] == value.shape[:2]
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
assert key.shape[:2] == value.shape[:2], (
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
)
else:
assert (
key.shape == value.shape
), f"key shape {key.shape} does not match value shape {value.shape}"
assert key.shape == value.shape, (
f"key shape {key.shape} does not match value shape {value.shape}"
)

#
# compute in-projection
#
if not use_separate_proj_weight:
assert (
in_proj_weight is not None
), "use_separate_proj_weight is False but in_proj_weight is None"
assert in_proj_weight is not None, (
"use_separate_proj_weight is False but in_proj_weight is None"
)
q, k, v = packed_in_projection_packed(
q=query, k=key, v=value, w=in_proj_weight, num_groups=num_groups, b=in_proj_bias
)
else:
assert (
q_proj_weight is not None
), "use_separate_proj_weight is True but q_proj_weight is None"
assert (
k_proj_weight is not None
), "use_separate_proj_weight is True but k_proj_weight is None"
assert (
v_proj_weight is not None
), "use_separate_proj_weight is True but v_proj_weight is None"
assert q_proj_weight is not None, (
"use_separate_proj_weight is True but q_proj_weight is None"
)
assert k_proj_weight is not None, (
"use_separate_proj_weight is True but k_proj_weight is None"
)
assert v_proj_weight is not None, (
"use_separate_proj_weight is True but v_proj_weight is None"
)
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
Expand Down
12 changes: 5 additions & 7 deletions torch_uncertainty/layers/packed.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,9 @@ def check_packed_parameters_consistency(alpha: float, gamma: int, num_estimators
if num_estimators is None:
raise ValueError("You must specify the value of the arg. `num_estimators`")
if not isinstance(num_estimators, int):
raise TypeError(
"Attribute `num_estimators` should be an int, not " f"{type(num_estimators)}"
)
raise TypeError(f"Attribute `num_estimators` should be an int, not {type(num_estimators)}")
if num_estimators <= 0:
raise ValueError("Attribute `num_estimators` should be >= 1, not " f"{num_estimators}")
raise ValueError(f"Attribute `num_estimators` should be >= 1, not {num_estimators}")


class PackedLinear(nn.Module):
Expand Down Expand Up @@ -697,9 +695,9 @@ def __init__(
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = self.embed_dim // self.num_heads
assert (
self.head_dim * self.num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
assert self.head_dim * self.num_heads == self.embed_dim, (
"embed_dim must be divisible by num_heads"
)

self.num_estimators = num_estimators
self.alpha = alpha
Expand Down
6 changes: 3 additions & 3 deletions torch_uncertainty/losses/bayesian.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,12 +98,12 @@ def set_model(self, model: nn.Module | None) -> None:

def _elbo_loss_checks(inner_loss: nn.Module, kl_weight: float, num_samples: int) -> None:
if isinstance(inner_loss, type):
raise TypeError("The inner_loss should be an instance of a class." f"Got {inner_loss}.")
raise TypeError(f"The inner_loss should be an instance of a class.Got {inner_loss}.")

if kl_weight < 0:
raise ValueError(f"The KL weight should be non-negative. Got {kl_weight}.")

if num_samples < 1:
raise ValueError("The number of samples should not be lower than 1." f"Got {num_samples}.")
raise ValueError(f"The number of samples should not be lower than 1. Got {num_samples}.")
if not isinstance(num_samples, int):
raise TypeError("The number of samples should be an integer. " f"Got {type(num_samples)}.")
raise TypeError(f"The number of samples should be an integer. Got {type(num_samples)}.")
12 changes: 6 additions & 6 deletions torch_uncertainty/losses/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ def __init__(

if reg_weight is not None and (reg_weight < 0):
raise ValueError(
"The regularization weight should be non-negative, but got " f"{reg_weight}."
f"The regularization weight should be non-negative, but got {reg_weight}."
)
self.reg_weight = reg_weight

if annealing_step is not None and (annealing_step <= 0):
raise ValueError("The annealing step should be positive, but got " f"{annealing_step}.")
raise ValueError(f"The annealing step should be positive, but got {annealing_step}.")
self.annealing_step = annealing_step

if reduction not in ("none", "mean", "sum") and reduction is not None:
Expand Down Expand Up @@ -178,11 +178,11 @@ def __init__(
self.reduction = reduction

if eps < 0:
raise ValueError("The epsilon value should be non-negative, but got " f"{eps}.")
raise ValueError(f"The epsilon value should be non-negative, but got {eps}.")
self.eps = eps
if reg_weight < 0:
raise ValueError(
"The regularization weight should be non-negative, but got " f"{reg_weight}."
f"The regularization weight should be non-negative, but got {reg_weight}."
)
self.reg_weight = reg_weight

Expand Down Expand Up @@ -233,7 +233,7 @@ def __init__(
self.reduction = reduction
if reg_weight < 0:
raise ValueError(
"The regularization weight should be non-negative, but got " f"{reg_weight}."
f"The regularization weight should be non-negative, but got {reg_weight}."
)
self.reg_weight = reg_weight

Expand Down Expand Up @@ -287,7 +287,7 @@ def __init__(

if gamma < 0:
raise ValueError(
"The gamma term of the focal loss should be non-negative, but got " f"{gamma}."
f"The gamma term of the focal loss should be non-negative, but got {gamma}."
)
self.gamma = gamma

Expand Down
4 changes: 2 additions & 2 deletions torch_uncertainty/losses/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def __init__(self, reg_weight: float, reduction: str | None = "mean") -> None:

if reg_weight < 0:
raise ValueError(
"The regularization weight should be non-negative, but got " f"{reg_weight}."
f"The regularization weight should be non-negative, but got {reg_weight}."
)
self.reg_weight = reg_weight

Expand Down Expand Up @@ -114,7 +114,7 @@ def __init__(self, beta: float = 0.5, reduction: str | None = "mean") -> None:
super().__init__()

if beta < 0 or beta > 1:
raise ValueError("The beta parameter should be in range [0, 1], but got " f"{beta}.")
raise ValueError(f"The beta parameter should be in range [0, 1], but got {beta}.")
self.beta = beta
self.nll_loss = nn.GaussianNLLLoss(reduction="none")
if reduction not in ("none", "mean", "sum"):
Expand Down
Loading

0 comments on commit 1b0ce53

Please sign in to comment.