Skip to content

Commit

Permalink
Enable Ruff Rules: PLW1514 and PLR6201 (#2284)
Browse files Browse the repository at this point in the history
* pre-commit autoupdate

Signed-off-by: Samet Akcay <[email protected]>

* Enable preview feautures, and disable some of the updated features

* Add missing copyrights

Signed-off-by: Samet Akcay <[email protected]>

* Ignore copyrights in notebooks

* "PLW1514", # Add explicit encoding argument

Signed-off-by: Samet Akcay <[email protected]>

* "PLR6201", # Convert to set

Signed-off-by: Samet Akcay <[email protected]>

---------

Signed-off-by: Samet Akcay <[email protected]>
  • Loading branch information
samet-akcay authored Sep 2, 2024
1 parent d3ad0cf commit 9e58ab3
Show file tree
Hide file tree
Showing 29 changed files with 49 additions and 52 deletions.
2 changes: 0 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,6 @@ lint.ignore = [
"A005", # Module is shadowing a Python built-in
"B909", # Mutation to loop iterable during iteration
"PLR6301", # could be a function, class method or static method
"PLW1514", # Add explicit encoding argument
"PLR6201", # Convert to set
"PLC2701", # Private name import
"PLC0415", # import should be at the top of the file
"PLR0917", # Too many positional arguments
Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/callbacks/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ def _should_skip_saving_checkpoint(self, trainer: Trainer) -> bool:
Overrides the parent method to allow saving during both the ``FITTING`` and ``VALIDATING`` states, and to allow
saving when the global step and last_global_step_saved are both 0 (only for zero-/few-shot models).
"""
is_zero_or_few_shot = trainer.lightning_module.learning_type in [LearningType.ZERO_SHOT, LearningType.FEW_SHOT]
is_zero_or_few_shot = trainer.lightning_module.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}
return (
bool(trainer.fast_dev_run) # disable checkpointing with fast_dev_run
or trainer.state.fn not in [TrainerFn.FITTING, TrainerFn.VALIDATING] # don't save anything during non-fit
or trainer.state.fn not in {TrainerFn.FITTING, TrainerFn.VALIDATING} # don't save anything during non-fit
or trainer.sanity_checking # don't save anything during sanity check
or (self._last_global_step_saved == trainer.global_step and not is_zero_or_few_shot)
)
Expand All @@ -52,7 +52,7 @@ def _should_save_on_train_epoch_end(self, trainer: Trainer) -> bool:
if self._save_on_train_epoch_end is not None:
return self._save_on_train_epoch_end

if trainer.lightning_module.learning_type in [LearningType.ZERO_SHOT, LearningType.FEW_SHOT]:
if trainer.lightning_module.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}:
return False

return super()._should_save_on_train_epoch_end(trainer)
10 changes: 5 additions & 5 deletions src/anomalib/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def add_arguments_to_parser(self, parser: ArgumentParser) -> None:
parser.add_argument("--metrics.pixel", type=list[str] | str | None, default=None, required=False)
parser.add_argument("--metrics.threshold", type=Threshold | str, default="F1AdaptiveThreshold")
parser.add_argument("--logging.log_graph", type=bool, help="Log the model to the logger", default=False)
if hasattr(parser, "subcommand") and parser.subcommand not in ("export", "predict"):
if hasattr(parser, "subcommand") and parser.subcommand not in {"export", "predict"}:
parser.link_arguments("task", "data.init_args.task")
parser.add_argument(
"--default_root_dir",
Expand Down Expand Up @@ -278,7 +278,7 @@ def _set_install_subcommand(self, action_subcommand: _ActionSubCommands) -> None
def before_instantiate_classes(self) -> None:
"""Modify the configuration to properly instantiate classes and sets up tiler."""
subcommand = self.config["subcommand"]
if subcommand in (*self.subcommands(), "train", "predict"):
if subcommand in {*self.subcommands(), "train", "predict"}:
self.config[subcommand] = update_config(self.config[subcommand])

def instantiate_classes(self) -> None:
Expand All @@ -288,7 +288,7 @@ def instantiate_classes(self) -> None:
But for subcommands we do not want to instantiate any trainer specific classes such as datamodule, model, etc
This is because the subcommand is responsible for instantiating and executing code based on the passed config
"""
if self.config["subcommand"] in (*self.subcommands(), "predict"): # trainer commands
if self.config["subcommand"] in {*self.subcommands(), "predict"}: # trainer commands
# since all classes are instantiated, the LightningCLI also creates an unused ``Trainer`` object.
# the minor change here is that engine is instantiated instead of trainer
self.config_init = self.parser.instantiate_classes(self.config)
Expand All @@ -301,7 +301,7 @@ def instantiate_classes(self) -> None:
else:
self.config_init = self.parser.instantiate_classes(self.config)
subcommand = self.config["subcommand"]
if subcommand in ("train", "export"):
if subcommand in {"train", "export"}:
self.instantiate_engine()
if "model" in self.config_init[subcommand]:
self.model = self._get(self.config_init, "model")
Expand Down Expand Up @@ -359,7 +359,7 @@ def _run_subcommand(self) -> None:

install_kwargs = self.config.get("install", {})
anomalib_install(**install_kwargs)
elif self.config["subcommand"] in (*self.subcommands(), "train", "export", "predict"):
elif self.config["subcommand"] in {*self.subcommands(), "train", "export", "predict"}:
fn = getattr(self.engine, self.subcommand)
fn_kwargs = self._prepare_subcommand_kwargs(self.subcommand)
fn(**fn_kwargs)
Expand Down
4 changes: 2 additions & 2 deletions src/anomalib/cli/install.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,12 @@ def anomalib_install(option: str = "full", verbose: bool = False) -> int:

# Parse requirements into torch and other requirements.
# This is done to parse the correct version of torch (cpu/cuda).
torch_requirement, other_requirements = parse_requirements(requirements, skip_torch=option not in ("full", "core"))
torch_requirement, other_requirements = parse_requirements(requirements, skip_torch=option not in {"full", "core"})

# Get install args for torch to install it from a specific index-url
install_args: list[str] = []
torch_install_args = []
if option in ("full", "core") and torch_requirement is not None:
if option in {"full", "core"} and torch_requirement is not None:
torch_install_args = get_torch_install_args(torch_requirement)

# Combine torch and other requirements.
Expand Down
4 changes: 2 additions & 2 deletions src/anomalib/cli/utils/help_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def get_verbosity_subcommand() -> dict:
{'subcommand': 'train', 'help': True, 'verbosity': 1}
"""
arguments: dict = {"subcommand": None, "help": False, "verbosity": 2}
if len(sys.argv) >= 2 and sys.argv[1] not in ("--help", "-h"):
if len(sys.argv) >= 2 and sys.argv[1] not in {"--help", "-h"}:
arguments["subcommand"] = sys.argv[1]
if "--help" in sys.argv or "-h" in sys.argv:
arguments["help"] = True
Expand Down Expand Up @@ -252,7 +252,7 @@ def format_help(self) -> str:
"""
with self.console.capture() as capture:
section = self._root_section
if self.subcommand in REQUIRED_ARGUMENTS and self.verbosity_level in (0, 1) and len(section.rich_items) > 1:
if self.subcommand in REQUIRED_ARGUMENTS and self.verbosity_level in {0, 1} and len(section.rich_items) > 1:
contents = render_guide(self.subcommand)
for content in contents:
self.console.print(content)
Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/cli/utils/installation.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def get_cuda_version() -> str | None:
# Check $CUDA_HOME/version.json file.
version_file = Path(cuda_home) / "version.json"
if version_file.is_file():
with Path(version_file).open() as file:
with Path(version_file).open(encoding="utf-8") as file:
data = json.load(file)
cuda_version = data.get("cuda", {}).get("version", None)
if cuda_version is not None:
Expand Down Expand Up @@ -319,7 +319,7 @@ def get_torch_install_args(requirement: str | Requirement) -> list[str]:
)
install_args: list[str] = []

if platform.system() in ("Linux", "Windows"):
if platform.system() in {"Linux", "Windows"}:
# Get the hardware suffix (eg., +cpu, +cu116 and +cu118 etc.)
hardware_suffix = get_hardware_suffix(with_available_torch_build=True, torch_version=version)

Expand All @@ -339,7 +339,7 @@ def get_torch_install_args(requirement: str | Requirement) -> list[str]:
torch_version,
torchvision_requirement,
]
elif platform.system() in ("macos", "Darwin"):
elif platform.system() in {"macos", "Darwin"}:
torch_version = str(requirement)
install_args += [torch_version]
else:
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/cli/utils/openvino.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def add_openvino_export_arguments(parser: ArgumentParser) -> None:
ov_parser = get_common_cli_parser()
# remove redundant keys from mo keys
for arg in ov_parser._actions: # noqa: SLF001
if arg.dest in ("help", "input_model", "output_dir"):
if arg.dest in {"help", "input_model", "output_dir"}:
continue
group.add_argument(f"--ov_args.{arg.dest}", type=arg.type, default=arg.default, help=arg.help)
else:
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/base/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def __getitem__(self, index: int) -> dict[str, str | torch.Tensor]:

if self.task == TaskType.CLASSIFICATION:
item["image"] = self.transform(image) if self.transform else image
elif self.task in (TaskType.DETECTION, TaskType.SEGMENTATION):
elif self.task in {TaskType.DETECTION, TaskType.SEGMENTATION}:
# Only Anomalous (1) images have masks in anomaly datasets
# Therefore, create empty mask for Normal (0) images.
mask = (
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/base/depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __getitem__(self, index: int) -> dict[str, str | torch.Tensor]:
item["image"], item["depth_image"] = (
self.transform(image, depth_image) if self.transform else (image, depth_image)
)
elif self.task in (TaskType.DETECTION, TaskType.SEGMENTATION):
elif self.task in {TaskType.DETECTION, TaskType.SEGMENTATION}:
# Only Anomalous (1) images have masks in anomaly datasets
# Therefore, create empty mask for Normal (0) images.
mask = (
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/image/btech.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def make_btech_dataset(path: Path, split: str | Split | None = None) -> DataFram
path = validate_path(path)

samples_list = [
(str(path),) + filename.parts[-3:] for filename in path.glob("**/*") if filename.suffix in (".bmp", ".png")
(str(path),) + filename.parts[-3:] for filename in path.glob("**/*") if filename.suffix in {".bmp", ".png"}
]
if not samples_list:
msg = f"Found 0 images in {path}"
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/utils/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ def extract(file_name: Path, root: Path) -> None:
zip_file.extract(file_info, root)

# Safely extract tar files.
elif file_name.suffix in (".tar", ".gz", ".xz", ".tgz"):
elif file_name.suffix in {".tar", ".gz", ".xz", ".tgz"}:
with tarfile.open(file_name) as tar_file:
members = tar_file.getmembers()
safe_members = [member for member in members if not is_file_potentially_dangerous(member.name)]
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/data/utils/tiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def __init__(
msg,
)

if self.mode not in (ImageUpscaleMode.PADDING, ImageUpscaleMode.INTERPOLATION):
if self.mode not in {ImageUpscaleMode.PADDING, ImageUpscaleMode.INTERPOLATION}:
msg = f"Unknown tiling mode {self.mode}. Available modes are padding and interpolation"
raise ValueError(msg)

Expand Down
4 changes: 2 additions & 2 deletions src/anomalib/deploy/inferencers/openvino_inferencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def load_model(self, path: str | Path | tuple[bytes, bytes]) -> tuple[Any, Any,
model = core.read_model(model=path[0], weights=path[1])
else:
path = path if isinstance(path, Path) else Path(path)
if path.suffix in (".bin", ".xml"):
if path.suffix in {".bin", ".xml"}:
if path.suffix == ".bin":
bin_path, xml_path = path, path.with_suffix(".xml")
elif path.suffix == ".xml":
Expand Down Expand Up @@ -279,7 +279,7 @@ def post_process(self, predictions: np.ndarray, metadata: dict | DictConfig | No

if task == TaskType.CLASSIFICATION:
_, pred_score = self._normalize(pred_scores=pred_score, metadata=metadata)
elif task in (TaskType.SEGMENTATION, TaskType.DETECTION):
elif task in {TaskType.SEGMENTATION, TaskType.DETECTION}:
if "pixel_threshold" in metadata:
pred_mask = (anomaly_map >= metadata["pixel_threshold"]).astype(np.uint8)

Expand Down
4 changes: 2 additions & 2 deletions src/anomalib/deploy/inferencers/torch_inferencer.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def _get_device(device: str) -> torch.device:
Returns:
torch.device: Device to use for inference.
"""
if device not in ("auto", "cpu", "cuda", "gpu"):
if device not in {"auto", "cpu", "cuda", "gpu"}:
msg = f"Unknown device {device}"
raise ValueError(msg)

Expand All @@ -102,7 +102,7 @@ def _load_checkpoint(self, path: str | Path) -> dict:
if isinstance(path, str):
path = Path(path)

if path.suffix not in (".pt", ".pth"):
if path.suffix not in {".pt", ".pth"}:
msg = f"Unknown torch checkpoint file format {path.suffix}. Make sure you save the Torch model."
raise ValueError(msg)

Expand Down
6 changes: 3 additions & 3 deletions src/anomalib/engine/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,7 +474,7 @@ def _should_run_validation(
bool: Whether it is needed to run a validation sequence.
"""
# validation before predict is only necessary for zero-/few-shot models
if model.learning_type not in [LearningType.ZERO_SHOT, LearningType.FEW_SHOT]:
if model.learning_type not in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}:
return False
# check if a checkpoint path is provided
if ckpt_path is not None:
Expand Down Expand Up @@ -534,7 +534,7 @@ def fit(
self._setup_trainer(model)
self._setup_dataset_task(train_dataloaders, val_dataloaders, datamodule)
self._setup_transform(model, datamodule=datamodule, ckpt_path=ckpt_path)
if model.learning_type in [LearningType.ZERO_SHOT, LearningType.FEW_SHOT]:
if model.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}:
# if the model is zero-shot or few-shot, we only need to run validate for normalization and thresholding
self.trainer.validate(model, val_dataloaders, datamodule=datamodule, ckpt_path=ckpt_path)
else:
Expand Down Expand Up @@ -856,7 +856,7 @@ def train(
datamodule,
)
self._setup_transform(model, datamodule=datamodule, ckpt_path=ckpt_path)
if model.learning_type in [LearningType.ZERO_SHOT, LearningType.FEW_SHOT]:
if model.learning_type in {LearningType.ZERO_SHOT, LearningType.FEW_SHOT}:
# if the model is zero-shot or few-shot, we only need to run validate for normalization and thresholding
self.trainer.validate(model, val_dataloaders, None, verbose=False, datamodule=datamodule)
else:
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/models/image/cfa/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def get_return_nodes(backbone: str) -> list[str]:
raise NotImplementedError(msg)

return_nodes: list[str]
if backbone in ("resnet18", "wide_resnet50_2"):
if backbone in {"resnet18", "wide_resnet50_2"}:
return_nodes = ["layer1", "layer2", "layer3"]
elif backbone == "vgg19_bn":
return_nodes = ["features.25", "features.38", "features.52"]
Expand Down
4 changes: 2 additions & 2 deletions src/anomalib/models/image/fastflow/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,11 +124,11 @@ def __init__(

self.input_size = input_size

if backbone in ("cait_m48_448", "deit_base_distilled_patch16_384"):
if backbone in {"cait_m48_448", "deit_base_distilled_patch16_384"}:
self.feature_extractor = timm.create_model(backbone, pretrained=pre_trained)
channels = [768]
scales = [16]
elif backbone in ("resnet18", "wide_resnet50_2"):
elif backbone in {"resnet18", "wide_resnet50_2"}:
self.feature_extractor = timm.create_model(
backbone,
pretrained=pre_trained,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
self.sigma = sigma
self.kernel_size = 2 * int(4.0 * sigma + 0.5) + 1

if mode not in (AnomalyMapGenerationMode.ADD, AnomalyMapGenerationMode.MULTIPLY):
if mode not in {AnomalyMapGenerationMode.ADD, AnomalyMapGenerationMode.MULTIPLY}:
msg = f"Found mode {mode}. Only multiply and add are supported."
raise ValueError(msg)
self.mode = mode
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,4 +163,4 @@ def get_bottleneck_layer(backbone: str, **kwargs) -> OCBE:
Returns:
Bottleneck_layer: One-Class Bottleneck Embedding module.
"""
return OCBE(BasicBlock, 2, **kwargs) if backbone in ("resnet18", "resnet34") else OCBE(Bottleneck, 3, **kwargs)
return OCBE(BasicBlock, 2, **kwargs) if backbone in {"resnet18", "resnet34"} else OCBE(Bottleneck, 3, **kwargs)
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def get_decoder(name: str) -> ResNet:
Returns:
ResNet: Decoder ResNet architecture.
"""
if name in (
if name in {
"resnet18",
"resnet34",
"resnet50",
Expand All @@ -346,7 +346,7 @@ def get_decoder(name: str) -> ResNet:
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
):
}:
decoder = globals()[f"de_{name}"]
else:
msg = f"Decoder with architecture {name} not supported"
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/models/image/uflow/feature_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def get_feature_extractor(backbone: str, input_size: tuple[int, int] = (256, 256
raise ValueError(msg)

feature_extractor: nn.Module
if backbone in ["resnet18", "wide_resnet50_2"]:
if backbone in {"resnet18", "wide_resnet50_2"}:
feature_extractor = FeatureExtractor(backbone, input_size, layers=("layer1", "layer2", "layer3")).eval()
if backbone == "mcait":
feature_extractor = MCaitFeatureExtractor().eval()
Expand Down
2 changes: 1 addition & 1 deletion src/anomalib/pipelines/components/base/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def _get_args(self, args: Namespace) -> dict:
parser = self.get_parser()
args = parser.parse_args()

with Path(args.config).open() as file:
with Path(args.config).open(encoding="utf-8") as file:
return yaml.safe_load(file)

@abstractmethod
Expand Down
6 changes: 3 additions & 3 deletions tests/integration/model/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,9 @@ def _get_objects(
and engine
"""
# select task type
if model_name in ("rkde", "ai_vad"):
if model_name in {"rkde", "ai_vad"}:
task_type = TaskType.DETECTION
elif model_name in ("ganomaly", "dfkde"):
elif model_name in {"ganomaly", "dfkde"}:
task_type = TaskType.CLASSIFICATION
else:
task_type = TaskType.SEGMENTATION
Expand All @@ -189,7 +189,7 @@ def _get_objects(
# https://github.com/openvinotoolkit/anomalib/issues/1478

extra_args = {}
if model_name in ("rkde", "dfkde"):
if model_name in {"rkde", "dfkde"}:
extra_args["n_pca_components"] = 2
if model_name == "ai_vad":
pytest.skip("Revisit AI-VAD test")
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/cli/test_installation.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
def requirements_file() -> Path:
"""Create a temporary requirements file with some example requirements."""
requirements = ["numpy==1.19.5", "opencv-python-headless>=4.5.1.48"]
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
with tempfile.NamedTemporaryFile(mode="w", delete=False, encoding="utf-8") as f:
f.write("\n".join(requirements))
return Path(f.name)

Expand Down
4 changes: 2 additions & 2 deletions tests/unit/data/base/depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def test_get_item_returns_correct_keys_and_shapes(self, datamodule: AnomalibData
# Check that the batch has the correct keys.
expected_keys = {"image_path", "depth_path", "label", "image", "depth_image"}

if dataloader.dataset.task in ("detection", "segmentation"):
if dataloader.dataset.task in {"detection", "segmentation"}:
expected_keys |= {"mask_path", "mask"}

if dataloader.dataset.task == "detection":
Expand All @@ -38,5 +38,5 @@ def test_get_item_returns_correct_keys_and_shapes(self, datamodule: AnomalibData
assert batch["depth_image"].shape == (4, 3, 256, 256)
assert batch["label"].shape == (4,)

if dataloader.dataset.task in ("detection", "segmentation"):
if dataloader.dataset.task in {"detection", "segmentation"}:
assert batch["mask"].shape == (4, 256, 256)
2 changes: 1 addition & 1 deletion tests/unit/data/base/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_get_item_returns_correct_keys_and_shapes(self, datamodule: AnomalibData
assert batch["image"].shape == (4, 3, 256, 256)
assert batch["label"].shape == (4,)

if dataloader.dataset.task in ("detection", "segmentation"):
if dataloader.dataset.task in {"detection", "segmentation"}:
assert batch["mask"].shape == (4, 256, 256)

def test_non_overlapping_splits(self, datamodule: AnomalibDataModule) -> None:
Expand Down
Loading

0 comments on commit 9e58ab3

Please sign in to comment.