From 20ee2c9cd9b60aa05973c3786932a3ac7b2455ae Mon Sep 17 00:00:00 2001 From: rajith Date: Mon, 4 Nov 2024 17:09:38 +0530 Subject: [PATCH 1/3] Fixes for flake8 linting --- .../torch_template/src/dataloader.py | 2 ++ .../torch_template/src/taskrunner.py | 2 ++ setup.cfg | 17 ++++++++++-- .../src/testflow_privateattributes.py | 19 ++++++-------- .../src/testflow_privateattributes.py | 19 ++++++-------- .../src/testflow_privateattributes.py | 19 ++++++-------- .../experiment_runner.py | 2 +- .../envoy/kvasir_shard_descriptor.py | 1 - .../experiments/pytorch_kvasir_unet/tasks.py | 2 +- .../envoy/shard_descriptor.py | 2 -- .../tensorflow_mnist/experiment.py | 26 +++++++------------ .../experiments/tensorflow_mnist/settings.py | 1 - tests/github/pki_wrong_cn.py | 1 + tests/github/test_gandlf.py | 1 + 14 files changed, 56 insertions(+), 58 deletions(-) diff --git a/openfl-workspace/torch_template/src/dataloader.py b/openfl-workspace/torch_template/src/dataloader.py index 302f4b0385..070561f864 100644 --- a/openfl-workspace/torch_template/src/dataloader.py +++ b/openfl-workspace/torch_template/src/dataloader.py @@ -3,6 +3,7 @@ from openfl.federated import PyTorchDataLoader + class TemplateDataLoader(PyTorchDataLoader): """Template dataloader for PyTorch. This class should be used as a template to create a custom DataLoader for your specific dataset. @@ -40,6 +41,7 @@ def __init__(self, data_path, batch_size, **kwargs): self.X_valid = X_valid self.y_valid = y_valid + def load_dataset(data_path, **kwargs): """ Load your dataset here. diff --git a/openfl-workspace/torch_template/src/taskrunner.py b/openfl-workspace/torch_template/src/taskrunner.py index 8a7f24d4fc..b008d02914 100644 --- a/openfl-workspace/torch_template/src/taskrunner.py +++ b/openfl-workspace/torch_template/src/taskrunner.py @@ -8,6 +8,7 @@ from openfl.federated import PyTorchTaskRunner from openfl.utilities import Metric + class TemplateTaskRunner(PyTorchTaskRunner): """Template Task Runner for PyTorch. @@ -102,6 +103,7 @@ def validate_( accuracy = None # Placeholder for accuracy calculation. return Metric(name="accuracy", value=np.array(accuracy)) + raise NotImplementedError( "Use /src/taskrunner.py template to create a custom Task Runner " "with your model definition and training/validation logic. Then remove this line." diff --git a/setup.cfg b/setup.cfg index 886f95ce82..0594a76f59 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,11 +1,16 @@ [flake8] ignore = # Conflicts with black - E203 + E203 # Line break occurred before a binary operator. Update by W504 Line W503 # Allow "import torch.nn.functional as F" N812 + # Line length, handled separately by max-line-length + E501 + # too many leading '#' for block comments + E266 + per-file-ignores = # Unused imports in __init__.py are OK @@ -13,7 +18,15 @@ per-file-ignores = exclude = *_pb2*, + .git, + __pycache__, + build, + dist, + .venv -max-line-length = 100 +max-line-length = 120 copyright-check = True + +# Enable specific checks or plugins +select = B,C,E,F,W,T4,B9 diff --git a/tests/github/experimental/workspace/testcase_private_attributes/src/testflow_privateattributes.py b/tests/github/experimental/workspace/testcase_private_attributes/src/testflow_privateattributes.py index 1f76015b45..d59f9b1100 100644 --- a/tests/github/experimental/workspace/testcase_private_attributes/src/testflow_privateattributes.py +++ b/tests/github/experimental/workspace/testcase_private_attributes/src/testflow_privateattributes.py @@ -37,7 +37,7 @@ def start(self): ) self.collaborators = self.runtime.collaborators - validate_agg_private_attr(self, "start", aggr = ["test_loader_agg"], collabs =["train_loader", "test_loader"]) + validate_agg_private_attr(self, "start", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"]) self.exclude_agg_to_agg = 10 self.include_agg_to_agg = 100 @@ -49,7 +49,7 @@ def aggregator_step(self): Testing whether Agg private attributes are accessible in next agg step. Collab private attributes should not be accessible here """ - validate_agg_private_attr(self, "aggregator_step", aggr = ["test_loader_agg"], collabs =["train_loader", "test_loader"]) + validate_agg_private_attr(self, "aggregator_step", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"]) self.include_agg_to_collab = 42 self.exclude_agg_to_collab = 40 @@ -66,7 +66,7 @@ def collaborator_step_a(self): Aggregator private attributes should not be accessible here """ validate_collab_private_attrs( - self, "collaborator_step_a", aggr = ["test_loader_agg"], collabs =["train_loader", "test_loader"] + self, "collaborator_step_a", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"] ) self.exclude_collab_to_collab = 2 @@ -81,7 +81,7 @@ def collaborator_step_b(self): """ validate_collab_private_attrs( - self, "collaborator_step_b", aggr = ["test_loader_agg"], collabs =["train_loader", "test_loader"] + self, "collaborator_step_b", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"] ) self.exclude_collab_to_agg = 10 self.include_collab_to_agg = 12 @@ -152,7 +152,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Aggregator should only be able to access its own attributes @@ -168,7 +168,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): ) # check for collaborator private attributes that should not be accessible - breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self,attr)] + breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self, attr)] if breached_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( step_name + "_collaborator_attributes_found" @@ -177,9 +177,6 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): f"{bcolors.FAIL} ... Attribute test failed in {step_name} - collaborator" + f"private attributes accessible:{','.join(breached_collab_attrs)} {bcolors.ENDC}" ) - - - for idx, collab in enumerate(self.collaborators): # Collaborator attributes should not be accessible in aggregator step if ( @@ -206,13 +203,13 @@ def validate_collab_private_attrs(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Collaborator should only be able to access its own attributes # check for missing collaborators attributes - inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self,attr)] + inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self, attr)] if inaccessible_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( diff --git a/tests/github/experimental/workspace/testcase_private_attributes_initialization_with_both_options/src/testflow_privateattributes.py b/tests/github/experimental/workspace/testcase_private_attributes_initialization_with_both_options/src/testflow_privateattributes.py index 49127fef0d..70a600a2e7 100644 --- a/tests/github/experimental/workspace/testcase_private_attributes_initialization_with_both_options/src/testflow_privateattributes.py +++ b/tests/github/experimental/workspace/testcase_private_attributes_initialization_with_both_options/src/testflow_privateattributes.py @@ -37,7 +37,7 @@ def start(self): ) self.collaborators = self.runtime.collaborators - validate_agg_private_attr(self,"start", aggr = ["test_loader_agg_via_callable"], collabs = ["train_loader_via_callable", "test_loader_via_callable"]) + validate_agg_private_attr(self, "start", aggr=["test_loader_agg_via_callable"], collabs=["train_loader_via_callable", "test_loader_via_callable"]) self.exclude_agg_to_agg = 10 self.include_agg_to_agg = 100 @@ -49,7 +49,7 @@ def aggregator_step(self): Testing whether Agg private attributes are accessible in next agg step. Collab private attributes should not be accessible here """ - validate_agg_private_attr(self, "aggregator_step", aggr = ["test_loader_agg_via_callable"], collabs = ["train_loader_via_callable", "test_loader_via_callable"]) + validate_agg_private_attr(self, "aggregator_step", aggr=["test_loader_agg_via_callable"], collabs=["train_loader_via_callable", "test_loader_via_callable"]) self.include_agg_to_collab = 42 self.exclude_agg_to_collab = 40 @@ -66,7 +66,7 @@ def collaborator_step_a(self): Aggregator private attributes should not be accessible here """ validate_collab_private_attrs( - self, "collaborator_step_a", aggr = ["test_loader_agg_via_callable"], collabs = ["train_loader_via_callable", "test_loader_via_callable"] + self, "collaborator_step_a", aggr=["test_loader_agg_via_callable"], collabs=["train_loader_via_callable", "test_loader_via_callable"] ) self.exclude_collab_to_collab = 2 @@ -81,7 +81,7 @@ def collaborator_step_b(self): """ validate_collab_private_attrs( - self, "collaborator_step_b", aggr = ["test_loader_agg_via_callable"], collabs = ["train_loader_via_callable", "test_loader_via_callable"] + self, "collaborator_step_b", aggr=["test_loader_agg_via_callable"], collabs=["train_loader_via_callable", "test_loader_via_callable"] ) self.exclude_collab_to_agg = 10 self.include_collab_to_agg = 12 @@ -152,7 +152,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Aggregator should only be able to access its own attributes @@ -168,7 +168,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): ) # check for collaborator private attributes that should not be accessible - breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self,attr)] + breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self, attr)] if breached_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( step_name + "_collaborator_attributes_found" @@ -177,9 +177,6 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): f"{bcolors.FAIL} ... Attribute test failed in {step_name} - collaborator" + f"private attributes accessible:{','.join(breached_collab_attrs)} {bcolors.ENDC}" ) - - - for idx, collab in enumerate(self.collaborators): # Collaborator attributes should not be accessible in aggregator step if ( @@ -206,13 +203,13 @@ def validate_collab_private_attrs(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Collaborator should only be able to access its own attributes # check for missing collaborators attributes - inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self,attr)] + inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self, attr)] if inaccessible_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( diff --git a/tests/github/experimental/workspace/testcase_private_attributes_initialization_without_callable/src/testflow_privateattributes.py b/tests/github/experimental/workspace/testcase_private_attributes_initialization_without_callable/src/testflow_privateattributes.py index 5ce49505c6..f9888657b0 100644 --- a/tests/github/experimental/workspace/testcase_private_attributes_initialization_without_callable/src/testflow_privateattributes.py +++ b/tests/github/experimental/workspace/testcase_private_attributes_initialization_without_callable/src/testflow_privateattributes.py @@ -37,7 +37,7 @@ def start(self): ) self.collaborators = self.runtime.collaborators - validate_agg_private_attr(self,"start", aggr = ["test_loader_agg"], collabs = ["train_loader", "test_loader"]) + validate_agg_private_attr(self, "start", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"]) self.exclude_agg_to_agg = 10 self.include_agg_to_agg = 100 @@ -49,7 +49,7 @@ def aggregator_step(self): Testing whether Agg private attributes are accessible in next agg step. Collab private attributes should not be accessible here """ - validate_agg_private_attr(self,"aggregator_step", aggr = ["test_loader_agg"], collabs = ["train_loader", "test_loader"]) + validate_agg_private_attr(self, "aggregator_step", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"]) self.include_agg_to_collab = 42 self.exclude_agg_to_collab = 40 @@ -66,7 +66,7 @@ def collaborator_step_a(self): Aggregator private attributes should not be accessible here """ validate_collab_private_attrs( - self, "collaborator_step_a", aggr = ["test_loader_agg"], collabs = ["train_loader", "test_loader"] + self, "collaborator_step_a", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"] ) self.exclude_collab_to_collab = 2 @@ -81,7 +81,7 @@ def collaborator_step_b(self): """ validate_collab_private_attrs( - self, "collaborator_step_b", aggr = ["test_loader_agg"], collabs = ["train_loader", "test_loader"] + self, "collaborator_step_b", aggr=["test_loader_agg"], collabs=["train_loader", "test_loader"] ) self.exclude_collab_to_agg = 10 self.include_collab_to_agg = 12 @@ -152,7 +152,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Aggregator should only be able to access its own attributes @@ -168,7 +168,7 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): ) # check for collaborator private attributes that should not be accessible - breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self,attr)] + breached_collab_attrs = [attr for attr in collab_attrs if hasattr(self, attr)] if breached_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( step_name + "_collaborator_attributes_found" @@ -177,9 +177,6 @@ def validate_agg_private_attr(self, step_name, **private_attrs_kwargs): f"{bcolors.FAIL} ... Attribute test failed in {step_name} - collaborator" + f"private attributes accessible:{','.join(breached_collab_attrs)} {bcolors.ENDC}" ) - - - for idx, collab in enumerate(self.collaborators): # Collaborator attributes should not be accessible in aggregator step if ( @@ -205,13 +202,13 @@ def validate_collab_private_attrs(self, step_name, **private_attrs_kwargs): step_name: Name of the step being validated private_attr_kwargs: Keyword arguments specifying the names of private attributes for the aggregator and collaborators. """ - agg_attrs = private_attrs_kwargs.get('aggr',[]) + agg_attrs = private_attrs_kwargs.get('aggr', []) collab_attrs = private_attrs_kwargs.get('collabs', []) # Collaborator should only be able to access its own attributes # check for missing collaborators attributes - inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self,attr)] + inaccessible_collab_attrs = [attr for attr in collab_attrs if not hasattr(self, attr)] if inaccessible_collab_attrs: TestFlowPrivateAttributes.ERROR_LIST.append( diff --git a/tests/github/interactive_api_director/experiment_runner.py b/tests/github/interactive_api_director/experiment_runner.py index 3a4036f9df..f59a89f8ef 100644 --- a/tests/github/interactive_api_director/experiment_runner.py +++ b/tests/github/interactive_api_director/experiment_runner.py @@ -114,7 +114,7 @@ def run_federation(shards: typing.Dict[str, Shard], director_path: str): logger.info('Starting the experiment!') running_processes = [] p = subprocess.Popen( - f"fx director start --disable-tls", + "fx director start --disable-tls", shell=True, cwd=os.path.join(director_path) ) diff --git a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/envoy/kvasir_shard_descriptor.py b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/envoy/kvasir_shard_descriptor.py index dc5358b41a..178e121c53 100644 --- a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/envoy/kvasir_shard_descriptor.py +++ b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/envoy/kvasir_shard_descriptor.py @@ -12,7 +12,6 @@ from openfl.interface.interactive_api.shard_descriptor import ShardDataset from openfl.interface.interactive_api.shard_descriptor import ShardDescriptor from openfl.utilities import validate_file_hash -from zipfile import ZipFile import requests diff --git a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/tasks.py b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/tasks.py index 577a5156af..ca9adb339a 100644 --- a/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/tasks.py +++ b/tests/github/interactive_api_director/experiments/pytorch_kvasir_unet/tasks.py @@ -59,7 +59,7 @@ def validate(unet_model, val_loader, device): samples = target.shape[0] total_samples += samples data, target = torch.tensor(data).to(device), \ - torch.tensor(target).to(device, dtype=torch.int64) + torch.tensor(target).to(device, dtype=torch.int64) output = unet_model(data) val = soft_dice_coef(output, target) val_score += val.sum().cpu().numpy() diff --git a/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/shard_descriptor.py b/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/shard_descriptor.py index 59a1441f00..5cf5a9edb4 100644 --- a/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/shard_descriptor.py +++ b/tests/github/interactive_api_director/experiments/tensorflow_mnist/envoy/shard_descriptor.py @@ -26,13 +26,11 @@ def __init__(self, rank_worldsize: str = '1,1') -> None: self.X_test = x_test[self.rank - 1::self.worldsize] self.y_test = y_test[self.rank - 1::self.worldsize] - # Calculating data and target shapes sample, _ = self[0] self._sample_shape = [str(dim) for dim in sample.shape] self._target_shape = ['0'] - def __getitem__(self, index): """Return a item by the index.""" if index < len(self.X_train): diff --git a/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py b/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py index ce20e434cc..6007b5103d 100644 --- a/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py +++ b/tests/github/interactive_api_director/experiments/tensorflow_mnist/experiment.py @@ -1,7 +1,7 @@ import tensorflow as tf # Create a federation from openfl.interface.interactive_api.federation import Federation -from openfl.interface.interactive_api.experiment import TaskInterface, DataInterface, ModelInterface, FLExperiment +from openfl.interface.interactive_api.experiment import TaskInterface, ModelInterface, FLExperiment from tests.github.interactive_api_director.experiments.tensorflow_mnist.dataset import FedDataset from tests.github.interactive_api_director.experiments.tensorflow_mnist.settings import model from tests.github.interactive_api_director.experiments.tensorflow_mnist.settings import optimizer @@ -40,21 +40,17 @@ def run(): for sample in samples: print(sample.shape) - framework_adapter = 'openfl.plugins.frameworks_adapters.keras_adapter.FrameworkAdapterPlugin' MI = ModelInterface(model=model, optimizer=optimizer, framework_plugin=framework_adapter) - def function_defined_in_notebook(some_parameter): print(f'Also I accept a parameter and it is {some_parameter}') - TI = TaskInterface() + # Task interface currently supports only standalone functions. - @TI.register_fl_task(model='model', data_loader='train_dataset', - device='device', optimizer='optimizer') + @TI.register_fl_task(model='model', data_loader='train_dataset', device='device', optimizer='optimizer') def train(model, train_dataset, optimizer, device, loss_fn=loss_fn, warmup=False): - # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): with tf.GradientTape() as tape: @@ -85,7 +81,6 @@ def train(model, train_dataset, optimizer, device, loss_fn=loss_fn, warmup=False return {'train_acc': train_acc} - @TI.register_fl_task(model='model', data_loader='val_dataset', device='device') def validate(model, val_dataset, device): # Run a validation loop at the end of each epoch. @@ -97,23 +92,20 @@ def validate(model, val_dataset, device): val_acc_metric.reset_states() print("Validation acc: %.4f" % (float(val_acc),)) - return {'validation_accuracy': val_acc,} + return {'validation_accuracy': val_acc, } # Save the initial model state - train(model,fed_dataset.get_train_loader(), optimizer, 'cpu', warmup=True) + train(model, fed_dataset.get_train_loader(), optimizer, 'cpu', warmup=True) initial_model = tf.keras.models.clone_model(model) - - # The Interactive API supports registering functions definied in main module or imported. - # create an experimnet in federation experiment_name = 'mnist_test_experiment' fl_experiment = FLExperiment( - federation=federation, - experiment_name=experiment_name, - serializer_plugin='openfl.plugins.interface_serializer.' - 'keras_serializer.KerasSerializer') + federation=federation, + experiment_name=experiment_name, + serializer_plugin='openfl.plugins.interface_serializer.' + 'keras_serializer.KerasSerializer') # If I use autoreload I got a pickling error # The following command zips the workspace and python requirements to be transfered to collaborator nodes diff --git a/tests/github/interactive_api_director/experiments/tensorflow_mnist/settings.py b/tests/github/interactive_api_director/experiments/tensorflow_mnist/settings.py index 8903160e15..b78ab1e5f2 100644 --- a/tests/github/interactive_api_director/experiments/tensorflow_mnist/settings.py +++ b/tests/github/interactive_api_director/experiments/tensorflow_mnist/settings.py @@ -1,4 +1,3 @@ -import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import numpy as np diff --git a/tests/github/pki_wrong_cn.py b/tests/github/pki_wrong_cn.py index 0e6eeff541..7ce669d707 100644 --- a/tests/github/pki_wrong_cn.py +++ b/tests/github/pki_wrong_cn.py @@ -12,6 +12,7 @@ import openfl.native as fx from openfl.utilities.utils import getfqdn_env + def prepare_workspace(): subprocess.check_call(['fx', 'workspace', 'certify']) subprocess.check_call(['fx', 'plan', 'initialize']) diff --git a/tests/github/test_gandlf.py b/tests/github/test_gandlf.py index e989b39e81..68b00a2382 100644 --- a/tests/github/test_gandlf.py +++ b/tests/github/test_gandlf.py @@ -13,6 +13,7 @@ from tests.github.utils import create_collaborator, certify_aggregator from openfl.utilities.utils import getfqdn_env + def exec(command, directory): os.chdir(directory) check_call(command) From e87ac6e8f6ee6eca62ab3c729e06a6fbd17f1a53 Mon Sep 17 00:00:00 2001 From: rajith Date: Mon, 4 Nov 2024 18:10:56 +0530 Subject: [PATCH 2/3] added comments --- setup.cfg | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/setup.cfg b/setup.cfg index 0594a76f59..431abf816c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -29,4 +29,11 @@ max-line-length = 120 copyright-check = True # Enable specific checks or plugins +# B: Bandit security checks (e.g., detecting insecure function use). +# C: Cyclomatic complexity, used to flag overly complex functions. +# E: PEP8 errors (e.g., style issues). +# F: Pyflakes errors, like unused imports or undefined names. +# W: PEP8 warnings (e.g., stylistic issues). +# T4: Type checking from third-party tools (like mypy). +# B9: Bugbear, for additional warnings about potentially error-prone code. select = B,C,E,F,W,T4,B9 From c997ef4cebbede580a4a40e927b10858657b3f86 Mon Sep 17 00:00:00 2001 From: rajith Date: Mon, 4 Nov 2024 18:11:51 +0530 Subject: [PATCH 3/3] revert max line length to 100 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 431abf816c..5874a2e513 100644 --- a/setup.cfg +++ b/setup.cfg @@ -24,7 +24,7 @@ exclude = dist, .venv -max-line-length = 120 +max-line-length = 100 copyright-check = True