Skip to content

Commit

Permalink
chore: rename use_gpu to disable_gpu in TorchAlgo
Browse files Browse the repository at this point in the history
Signed-off-by: ThibaultFy <[email protected]>
  • Loading branch information
ThibaultFy committed Aug 12, 2024
1 parent 7ecc8d7 commit 860e0c6
Show file tree
Hide file tree
Showing 12 changed files with 36 additions and 36 deletions.
10 changes: 5 additions & 5 deletions substrafl/algorithms/pytorch/torch_base_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
seed: Optional[int] = None,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand All @@ -63,7 +63,7 @@ def __init__(
np.random.seed(seed)
torch.manual_seed(seed)

self._device = self._get_torch_device(use_gpu=use_gpu)
self._device = self._get_torch_device(disable_gpu=disable_gpu)

self._model = model.to(self._device)
self._optimizer = optimizer
Expand Down Expand Up @@ -212,18 +212,18 @@ def _local_train(
if self._scheduler is not None:
self._scheduler.step()

def _get_torch_device(self, use_gpu: bool) -> torch.device:
def _get_torch_device(self, disable_gpu: bool) -> torch.device:
"""Get the torch device, CPU or GPU, depending
on availability and user input.
Args:
use_gpu (bool): whether to use GPUs if available or not.
disable_gpu (bool): whether to use GPUs if available or not.
Returns:
torch.device: Torch device
"""
device = torch.device("cpu")
if use_gpu and torch.cuda.is_available():
if not disable_gpu and torch.cuda.is_available():
device = torch.device("cuda")
return device

Expand Down
6 changes: 3 additions & 3 deletions substrafl/algorithms/pytorch/torch_fed_avg_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def __init__(
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
with_batch_norm_parameters: bool = False,
seed: Optional[int] = None,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand Down Expand Up @@ -125,7 +125,7 @@ def __init__(
with_batch_norm_parameters (bool): Whether to include the batch norm layer parameters in the fed avg
strategy. Defaults to False.
seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None.
use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
"""
super().__init__(
*args,
Expand All @@ -136,7 +136,7 @@ def __init__(
dataset=dataset,
scheduler=scheduler,
seed=seed,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
**kwargs,
)
self._with_batch_norm_parameters = with_batch_norm_parameters
Expand Down
6 changes: 3 additions & 3 deletions substrafl/algorithms/pytorch/torch_fed_pca_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def __init__(
out_features: int,
batch_size: Optional[int] = None,
seed: int = 1,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand All @@ -101,7 +101,7 @@ def __init__(
out_features (int): dimension to keep after PCA
batch_size (Optional[int]): mini-batch size
seed (int): random generator seed. The seed is mandatory. Default to 1.
use_gpu (bool): whether to use GPU or not. Default to True.
disable_gpu (bool): whether to use GPU or not. Default to True.
"""
self.in_features = in_features
self.out_features = out_features
Expand All @@ -122,7 +122,7 @@ def __init__(
index_generator=None,
dataset=dataset,
seed=self._seed,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
**kwargs,
)

Expand Down
6 changes: 3 additions & 3 deletions substrafl/algorithms/pytorch/torch_newton_raphson_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def __init__(
l2_coeff: float = 0,
with_batch_norm_parameters: bool = False,
seed: Optional[int] = None,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand Down Expand Up @@ -80,7 +80,7 @@ def __init__(
with_batch_norm_parameters (bool): Whether to include the batch norm layer parameters in the Newton-Raphson
strategy. Defaults to False.
seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None.
use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
"""
assert "optimizer" not in kwargs, "Newton Raphson strategy does not uses optimizers"

Expand All @@ -91,7 +91,7 @@ def __init__(
optimizer=None,
index_generator=None,
dataset=dataset,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
seed=seed,
**kwargs,
)
Expand Down
6 changes: 3 additions & 3 deletions substrafl/algorithms/pytorch/torch_scaffold_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ def __init__(
with_batch_norm_parameters: bool = False,
c_update_rule: CUpdateRule = CUpdateRule.FAST,
seed: Optional[int] = None,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand Down Expand Up @@ -153,7 +153,7 @@ def __init__(
client control variate.
Defaults to CUpdateRule.FAST.
seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None.
use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
Raises:
:ref:`~substrafl.exceptions.NumUpdatesValueError`: If `num_updates` is inferior or equal to zero.
"""
Expand All @@ -165,7 +165,7 @@ def __init__(
index_generator=index_generator,
dataset=dataset,
scheduler=scheduler,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
seed=seed,
**kwargs,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def __init__(
dataset: torch.utils.data.Dataset,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
seed: Optional[int] = None,
use_gpu: bool = True,
disable_gpu: bool = True,
*args,
**kwargs,
):
Expand Down Expand Up @@ -121,7 +121,7 @@ def __init__(
scheduler (torch.optim.lr_scheduler._LRScheduler, Optional): A torch scheduler that will be called at every
batch. If None, no scheduler will be used. Defaults to None.
seed (typing.Optional[int]): Seed set at the algo initialization on each organization. Defaults to None.
use_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
disable_gpu (bool): Whether to use the GPUs if they are available. Defaults to True.
"""
super().__init__(
*args,
Expand All @@ -132,7 +132,7 @@ def __init__(
dataset=dataset,
scheduler=scheduler,
seed=seed,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
**kwargs,
)

Expand Down
18 changes: 9 additions & 9 deletions tests/algorithms/pytorch/test_base_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(self):
optimizer=torch.optim.SGD(perceptron.parameters(), lr=0.1),
index_generator=nig,
seed=test_seed,
use_gpu=False,
disable_gpu=True,
)

@property
Expand Down Expand Up @@ -162,7 +162,7 @@ def train(self, data_from_opener, shared_state):


@pytest.fixture(params=[pytest.param(True, marks=pytest.mark.gpu), False])
def use_gpu(request):
def disable_gpu(request):
return request.param


Expand All @@ -174,7 +174,7 @@ def use_gpu(request):
(TorchNewtonRaphsonAlgo, NewtonRaphson),
]
)
def dummy_gpu(request, torch_linear_model, use_gpu, numpy_torch_dataset):
def dummy_gpu(request, torch_linear_model, disable_gpu, numpy_torch_dataset):
nig = NpIndexGenerator(
batch_size=1,
num_updates=1,
Expand All @@ -189,7 +189,7 @@ def __init__(self):
criterion=torch.nn.MSELoss(),
dataset=numpy_torch_dataset,
batch_size=1,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
)
else:
super().__init__(
Expand All @@ -198,9 +198,9 @@ def __init__(self):
criterion=torch.nn.MSELoss(),
dataset=numpy_torch_dataset,
index_generator=nig,
use_gpu=use_gpu,
disable_gpu=disable_gpu,
)
if use_gpu:
if disable_gpu:
assert self._device == torch.device("cuda")
else:
assert self._device == torch.device("cpu")
Expand All @@ -209,7 +209,7 @@ def __init__(self):
def strategies(self):
return list(StrategyName)

return MyAlgo, request.param[1], use_gpu
return MyAlgo, request.param[1], disable_gpu


def test_base_algo_custom_init_arg_default_value(session_dir, dummy_algo_custom_init_arg):
Expand Down Expand Up @@ -469,7 +469,7 @@ def test_gpu(
aggregation_node,
):
num_rounds = 2
algo_class, strategy_class, use_gpu = dummy_gpu
algo_class, strategy_class, disable_gpu = dummy_gpu
my_algo = algo_class()
algo_deps = Dependency(
pypi_dependencies=["torch==2.2.1", "numpy==1.26.4", "pytest"],
Expand Down Expand Up @@ -499,7 +499,7 @@ def test_gpu(
dependencies=algo_deps,
experiment_folder=session_dir / "experiment_folder",
clean_models=False,
name=f'Testing the GPU - strategy {strategy_class.__name__}, running on {"cuda" if use_gpu else "cpu"}',
name=f'Testing the GPU - strategy {strategy_class.__name__}, running on {"cuda" if disable_gpu else "cpu"}',
)

# Wait for the compute plan to be finished
Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/pytorch/test_fed_avg.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def __init__(
model=perceptron,
index_generator=nig,
dataset=numpy_torch_dataset,
use_gpu=False,
disable_gpu=True,
)

return MyAlgo
Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/pytorch/test_fed_pca_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self, batch_size=1):
batch_size=batch_size,
dataset=numpy_torch_dataset,
seed=seed,
use_gpu=False,
disable_gpu=True,
)

return MyAlgo
Expand Down
4 changes: 2 additions & 2 deletions tests/algorithms/pytorch/test_newton_raphson.py
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ def __init__(self):
batch_size=BATCH_SIZE,
dataset=numpy_torch_dataset,
l2_coeff=0,
use_gpu=False,
disable_gpu=True,
)

my_algo = MyAlgo()
Expand Down Expand Up @@ -435,7 +435,7 @@ def __init__(self):
batch_size=BATCH_SIZE,
dataset=numpy_torch_dataset,
l2_coeff=0,
use_gpu=False,
disable_gpu=True,
)

my_algo = MyAlgo()
Expand Down
2 changes: 1 addition & 1 deletion tests/algorithms/pytorch/test_scaffold.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
index_generator=nig,
dataset=numpy_torch_dataset,
scheduler=scheduler,
use_gpu=False,
disable_gpu=True,
)

return MyAlgo
Expand Down
4 changes: 2 additions & 2 deletions tests/algorithms/pytorch/test_single_organization.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
model=perceptron,
index_generator=nig,
dataset=numpy_torch_dataset,
use_gpu=False,
disable_gpu=True,
)

my_algo = MySingleOrganizationAlgo()
Expand Down Expand Up @@ -107,7 +107,7 @@ def __init__(
model=perceptron,
index_generator=nig,
dataset=numpy_torch_dataset,
use_gpu=False,
disable_gpu=True,
)

my_algo = MySingleOrganizationAlgo()
Expand Down

0 comments on commit 860e0c6

Please sign in to comment.