From 860e0c6e986be11f8672409f0c9321a217ab2056 Mon Sep 17 00:00:00 2001 From: ThibaultFy Date: Mon, 12 Aug 2024 16:04:19 +0200 Subject: [PATCH] chore: rename use_gpu to disable_gpu in TorchAlgo Signed-off-by: ThibaultFy --- .../algorithms/pytorch/torch_base_algo.py | 10 +++++----- .../algorithms/pytorch/torch_fed_avg_algo.py | 6 +++--- .../algorithms/pytorch/torch_fed_pca_algo.py | 6 +++--- .../pytorch/torch_newton_raphson_algo.py | 6 +++--- .../algorithms/pytorch/torch_scaffold_algo.py | 6 +++--- .../pytorch/torch_single_organization_algo.py | 6 +++--- tests/algorithms/pytorch/test_base_algo.py | 18 +++++++++--------- tests/algorithms/pytorch/test_fed_avg.py | 2 +- tests/algorithms/pytorch/test_fed_pca_algo.py | 2 +- .../algorithms/pytorch/test_newton_raphson.py | 4 ++-- tests/algorithms/pytorch/test_scaffold.py | 2 +- .../pytorch/test_single_organization.py | 4 ++-- 12 files changed, 36 insertions(+), 36 deletions(-) diff --git a/substrafl/algorithms/pytorch/torch_base_algo.py b/substrafl/algorithms/pytorch/torch_base_algo.py index 0562a1c5..55e57053 100644 --- a/substrafl/algorithms/pytorch/torch_base_algo.py +++ b/substrafl/algorithms/pytorch/torch_base_algo.py @@ -46,7 +46,7 @@ def __init__( optimizer: Optional[torch.optim.Optimizer] = None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, seed: Optional[int] = None, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -63,7 +63,7 @@ def __init__( np.random.seed(seed) torch.manual_seed(seed) - self._device = self._get_torch_device(use_gpu=use_gpu) + self._device = self._get_torch_device(disable_gpu=disable_gpu) self._model = model.to(self._device) self._optimizer = optimizer @@ -212,18 +212,18 @@ def _local_train( if self._scheduler is not None: self._scheduler.step() - def _get_torch_device(self, use_gpu: bool) -> torch.device: + def _get_torch_device(self, disable_gpu: bool) -> torch.device: """Get the torch device, CPU or GPU, depending on availability and user input. Args: - use_gpu (bool): whether to use GPUs if available or not. + disable_gpu (bool): whether to use GPUs if available or not. Returns: torch.device: Torch device """ device = torch.device("cpu") - if use_gpu and torch.cuda.is_available(): + if not disable_gpu and torch.cuda.is_available(): device = torch.device("cuda") return device diff --git a/substrafl/algorithms/pytorch/torch_fed_avg_algo.py b/substrafl/algorithms/pytorch/torch_fed_avg_algo.py index bde3b02c..2ecbc751 100644 --- a/substrafl/algorithms/pytorch/torch_fed_avg_algo.py +++ b/substrafl/algorithms/pytorch/torch_fed_avg_algo.py @@ -92,7 +92,7 @@ def __init__( scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, with_batch_norm_parameters: bool = False, seed: Optional[int] = None, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -125,7 +125,7 @@ def __init__( with_batch_norm_parameters (bool): Whether to include the batch norm layer parameters in the fed avg strategy. Defaults to False. seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None. - use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. + disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. """ super().__init__( *args, @@ -136,7 +136,7 @@ def __init__( dataset=dataset, scheduler=scheduler, seed=seed, - use_gpu=use_gpu, + disable_gpu=disable_gpu, **kwargs, ) self._with_batch_norm_parameters = with_batch_norm_parameters diff --git a/substrafl/algorithms/pytorch/torch_fed_pca_algo.py b/substrafl/algorithms/pytorch/torch_fed_pca_algo.py index 4414d397..8e261ce0 100644 --- a/substrafl/algorithms/pytorch/torch_fed_pca_algo.py +++ b/substrafl/algorithms/pytorch/torch_fed_pca_algo.py @@ -87,7 +87,7 @@ def __init__( out_features: int, batch_size: Optional[int] = None, seed: int = 1, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -101,7 +101,7 @@ def __init__( out_features (int): dimension to keep after PCA batch_size (Optional[int]): mini-batch size seed (int): random generator seed. The seed is mandatory. Default to 1. - use_gpu (bool): whether to use GPU or not. Default to True. + disable_gpu (bool): whether to use GPU or not. Default to True. """ self.in_features = in_features self.out_features = out_features @@ -122,7 +122,7 @@ def __init__( index_generator=None, dataset=dataset, seed=self._seed, - use_gpu=use_gpu, + disable_gpu=disable_gpu, **kwargs, ) diff --git a/substrafl/algorithms/pytorch/torch_newton_raphson_algo.py b/substrafl/algorithms/pytorch/torch_newton_raphson_algo.py index 034726fc..852beba1 100644 --- a/substrafl/algorithms/pytorch/torch_newton_raphson_algo.py +++ b/substrafl/algorithms/pytorch/torch_newton_raphson_algo.py @@ -48,7 +48,7 @@ def __init__( l2_coeff: float = 0, with_batch_norm_parameters: bool = False, seed: Optional[int] = None, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -80,7 +80,7 @@ def __init__( with_batch_norm_parameters (bool): Whether to include the batch norm layer parameters in the Newton-Raphson strategy. Defaults to False. seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None. - use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. + disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. """ assert "optimizer" not in kwargs, "Newton Raphson strategy does not uses optimizers" @@ -91,7 +91,7 @@ def __init__( optimizer=None, index_generator=None, dataset=dataset, - use_gpu=use_gpu, + disable_gpu=disable_gpu, seed=seed, **kwargs, ) diff --git a/substrafl/algorithms/pytorch/torch_scaffold_algo.py b/substrafl/algorithms/pytorch/torch_scaffold_algo.py index f83d2759..8ce14608 100644 --- a/substrafl/algorithms/pytorch/torch_scaffold_algo.py +++ b/substrafl/algorithms/pytorch/torch_scaffold_algo.py @@ -118,7 +118,7 @@ def __init__( with_batch_norm_parameters: bool = False, c_update_rule: CUpdateRule = CUpdateRule.FAST, seed: Optional[int] = None, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -153,7 +153,7 @@ def __init__( client control variate. Defaults to CUpdateRule.FAST. seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None. - use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. + disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. Raises: :ref:`~substrafl.exceptions.NumUpdatesValueError`: If `num_updates` is inferior or equal to zero. """ @@ -165,7 +165,7 @@ def __init__( index_generator=index_generator, dataset=dataset, scheduler=scheduler, - use_gpu=use_gpu, + disable_gpu=disable_gpu, seed=seed, **kwargs, ) diff --git a/substrafl/algorithms/pytorch/torch_single_organization_algo.py b/substrafl/algorithms/pytorch/torch_single_organization_algo.py index d432dff8..f0e459ad 100644 --- a/substrafl/algorithms/pytorch/torch_single_organization_algo.py +++ b/substrafl/algorithms/pytorch/torch_single_organization_algo.py @@ -90,7 +90,7 @@ def __init__( dataset: torch.utils.data.Dataset, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, seed: Optional[int] = None, - use_gpu: bool = True, + disable_gpu: bool = True, *args, **kwargs, ): @@ -121,7 +121,7 @@ def __init__( scheduler (torch.optim.lr_scheduler._LRScheduler, Optional): A torch scheduler that will be called at every batch. If None, no scheduler will be used. Defaults to None. seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None. - use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. + disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True. """ super().__init__( *args, @@ -132,7 +132,7 @@ def __init__( dataset=dataset, scheduler=scheduler, seed=seed, - use_gpu=use_gpu, + disable_gpu=disable_gpu, **kwargs, ) diff --git a/tests/algorithms/pytorch/test_base_algo.py b/tests/algorithms/pytorch/test_base_algo.py index eb4332ef..39c4813b 100644 --- a/tests/algorithms/pytorch/test_base_algo.py +++ b/tests/algorithms/pytorch/test_base_algo.py @@ -50,7 +50,7 @@ def __init__(self): optimizer=torch.optim.SGD(perceptron.parameters(), lr=0.1), index_generator=nig, seed=test_seed, - use_gpu=False, + disable_gpu=True, ) @property @@ -162,7 +162,7 @@ def train(self, data_from_opener, shared_state): @pytest.fixture(params=[pytest.param(True, marks=pytest.mark.gpu), False]) -def use_gpu(request): +def disable_gpu(request): return request.param @@ -174,7 +174,7 @@ def use_gpu(request): (TorchNewtonRaphsonAlgo, NewtonRaphson), ] ) -def dummy_gpu(request, torch_linear_model, use_gpu, numpy_torch_dataset): +def dummy_gpu(request, torch_linear_model, disable_gpu, numpy_torch_dataset): nig = NpIndexGenerator( batch_size=1, num_updates=1, @@ -189,7 +189,7 @@ def __init__(self): criterion=torch.nn.MSELoss(), dataset=numpy_torch_dataset, batch_size=1, - use_gpu=use_gpu, + disable_gpu=disable_gpu, ) else: super().__init__( @@ -198,9 +198,9 @@ def __init__(self): criterion=torch.nn.MSELoss(), dataset=numpy_torch_dataset, index_generator=nig, - use_gpu=use_gpu, + disable_gpu=disable_gpu, ) - if use_gpu: + if disable_gpu: assert self._device == torch.device("cuda") else: assert self._device == torch.device("cpu") @@ -209,7 +209,7 @@ def __init__(self): def strategies(self): return list(StrategyName) - return MyAlgo, request.param[1], use_gpu + return MyAlgo, request.param[1], disable_gpu def test_base_algo_custom_init_arg_default_value(session_dir, dummy_algo_custom_init_arg): @@ -469,7 +469,7 @@ def test_gpu( aggregation_node, ): num_rounds = 2 - algo_class, strategy_class, use_gpu = dummy_gpu + algo_class, strategy_class, disable_gpu = dummy_gpu my_algo = algo_class() algo_deps = Dependency( pypi_dependencies=["torch==2.2.1", "numpy==1.26.4", "pytest"], @@ -499,7 +499,7 @@ def test_gpu( dependencies=algo_deps, experiment_folder=session_dir / "experiment_folder", clean_models=False, - name=f'Testing the GPU - strategy {strategy_class.__name__}, running on {"cuda" if use_gpu else "cpu"}', + name=f'Testing the GPU - strategy {strategy_class.__name__}, running on {"cuda" if disable_gpu else "cpu"}', ) # Wait for the compute plan to be finished diff --git a/tests/algorithms/pytorch/test_fed_avg.py b/tests/algorithms/pytorch/test_fed_avg.py index a686fe54..de242bef 100644 --- a/tests/algorithms/pytorch/test_fed_avg.py +++ b/tests/algorithms/pytorch/test_fed_avg.py @@ -46,7 +46,7 @@ def __init__( model=perceptron, index_generator=nig, dataset=numpy_torch_dataset, - use_gpu=False, + disable_gpu=True, ) return MyAlgo diff --git a/tests/algorithms/pytorch/test_fed_pca_algo.py b/tests/algorithms/pytorch/test_fed_pca_algo.py index dbc87061..9ba97ea4 100644 --- a/tests/algorithms/pytorch/test_fed_pca_algo.py +++ b/tests/algorithms/pytorch/test_fed_pca_algo.py @@ -36,7 +36,7 @@ def __init__(self, batch_size=1): batch_size=batch_size, dataset=numpy_torch_dataset, seed=seed, - use_gpu=False, + disable_gpu=True, ) return MyAlgo diff --git a/tests/algorithms/pytorch/test_newton_raphson.py b/tests/algorithms/pytorch/test_newton_raphson.py index 9553f623..948c5dbc 100644 --- a/tests/algorithms/pytorch/test_newton_raphson.py +++ b/tests/algorithms/pytorch/test_newton_raphson.py @@ -348,7 +348,7 @@ def __init__(self): batch_size=BATCH_SIZE, dataset=numpy_torch_dataset, l2_coeff=0, - use_gpu=False, + disable_gpu=True, ) my_algo = MyAlgo() @@ -435,7 +435,7 @@ def __init__(self): batch_size=BATCH_SIZE, dataset=numpy_torch_dataset, l2_coeff=0, - use_gpu=False, + disable_gpu=True, ) my_algo = MyAlgo() diff --git a/tests/algorithms/pytorch/test_scaffold.py b/tests/algorithms/pytorch/test_scaffold.py index 8f6c1551..7468325d 100644 --- a/tests/algorithms/pytorch/test_scaffold.py +++ b/tests/algorithms/pytorch/test_scaffold.py @@ -50,7 +50,7 @@ def __init__( index_generator=nig, dataset=numpy_torch_dataset, scheduler=scheduler, - use_gpu=False, + disable_gpu=True, ) return MyAlgo diff --git a/tests/algorithms/pytorch/test_single_organization.py b/tests/algorithms/pytorch/test_single_organization.py index 1c0f5aae..5040a3a6 100644 --- a/tests/algorithms/pytorch/test_single_organization.py +++ b/tests/algorithms/pytorch/test_single_organization.py @@ -50,7 +50,7 @@ def __init__( model=perceptron, index_generator=nig, dataset=numpy_torch_dataset, - use_gpu=False, + disable_gpu=True, ) my_algo = MySingleOrganizationAlgo() @@ -107,7 +107,7 @@ def __init__( model=perceptron, index_generator=nig, dataset=numpy_torch_dataset, - use_gpu=False, + disable_gpu=True, ) my_algo = MySingleOrganizationAlgo()