diff --git a/benchmark/camelyon/benchmarks.py b/benchmark/camelyon/benchmarks.py index 76ea8176..228d29c0 100644 --- a/benchmark/camelyon/benchmarks.py +++ b/benchmark/camelyon/benchmarks.py @@ -64,7 +64,7 @@ def fed_avg(params: dict, train_folder: Path, test_folder: Path): mode=exp_params["mode"], cp_name=exp_params["cp_name"], cancel_cp=exp_params["cancel_cp"], - torch_gpu=exp_params["torch_gpu"], + use_gpu=exp_params["use_gpu"], ) if exp_params["skip_pure_torch"]: diff --git a/benchmark/camelyon/common/utils.py b/benchmark/camelyon/common/utils.py index 1ed69494..b971fe11 100644 --- a/benchmark/camelyon/common/utils.py +++ b/benchmark/camelyon/common/utils.py @@ -81,7 +81,7 @@ def parse_params() -> dict: default=False, help="Remote only: cancel the CP after registration", ) - parser.add_argument("--torch-gpu", action="store_true", help="Use PyTorch with GPU/CUDA support") + parser.add_argument("--use-gpu", action="store_true", help="Use PyTorch with GPU/CUDA support") parser.add_argument( "--skip-pure-torch", action="store_true", @@ -107,7 +107,7 @@ def parse_params() -> dict: params["nb_test_data_samples"] = args.nb_test_data_samples params["data_path"] = args.data_path params["cancel_cp"] = args.cancel_cp - params["torch_gpu"] = args.torch_gpu + params["use_gpu"] = args.use_gpu params["skip_pure_torch"] = args.skip_pure_torch params["cp_name"] = args.cp_name diff --git a/benchmark/camelyon/workflows.py b/benchmark/camelyon/workflows.py index 23f805be..43a6a2ef 100644 --- a/benchmark/camelyon/workflows.py +++ b/benchmark/camelyon/workflows.py @@ -44,7 +44,7 @@ def substrafl_fed_avg( asset_keys_path: Path, cp_name: Optional[str], cancel_cp: bool = False, - torch_gpu: bool = False, + use_gpu: bool = False, ) -> benchmark_metrics.BenchmarkResults: """Execute Weldon algorithm for a fed avg strategy with substrafl API. @@ -68,7 +68,7 @@ def substrafl_fed_avg( Otherwise, all present keys in this fill will be reused per Substra in remote mode. cp_name ben): (Optional[str]): Compute Plan name to display cancel_cp (bool): if set to True, the CP will be canceled as soon as it's registered. Only work for remote mode. - torch_gpu (bool): Use GPU default index for pytorch + use_gpu (bool): Use GPU for Dependency object Returns: dict: Results of the experiment. """ @@ -97,7 +97,7 @@ def substrafl_fed_avg( "torch==2.3.0", "scikit-learn==1.5.1", ] - if not torch_gpu: + if not use_gpu: pypi_dependencies += ["--extra-index-url https://download.pytorch.org/whl/cpu"] # Dependencies @@ -108,6 +108,7 @@ def substrafl_fed_avg( # Keeping editable_mode=True to ensure nightly test benchmarks are ran against main substrafl git ref editable_mode=True, compile=True, + use_gpu=use_gpu, ) # Metrics diff --git a/changes/244.fixed b/changes/244.fixed new file mode 100644 index 00000000..5332e812 --- /dev/null +++ b/changes/244.fixed @@ -0,0 +1 @@ +Actually trigger the GPU docker configuration with `use_gpu` flag when running Camelyon benchmark