From b23478a531eaf47286db368890f6bed469730a83 Mon Sep 17 00:00:00 2001 From: Karan Shah Date: Mon, 9 Dec 2024 14:09:41 +0530 Subject: [PATCH 1/5] Update log verbosity (#1202) Signed-off-by: Shah, Karan --- openfl/component/aggregator/aggregator.py | 114 +++--------------- openfl/component/collaborator/collaborator.py | 2 - openfl/federated/task/runner_keras.py | 4 +- 3 files changed, 18 insertions(+), 102 deletions(-) diff --git a/openfl/component/aggregator/aggregator.py b/openfl/component/aggregator/aggregator.py index 038a1e00d3..041022e300 100644 --- a/openfl/component/aggregator/aggregator.py +++ b/openfl/component/aggregator/aggregator.py @@ -79,7 +79,7 @@ def __init__( write_logs=False, log_memory_usage=False, log_metric_callback=None, - **kwargs, + initial_tensor_dict=None, ): """Initializes the Aggregator. @@ -110,15 +110,18 @@ def __init__( to None. **kwargs: Additional keyword arguments. """ + self.logger = getLogger(__name__) self.round_number = 0 - self.single_col_cert_common_name = single_col_cert_common_name - if self.single_col_cert_common_name is not None: - self._log_big_warning() - else: - # FIXME: '' instead of None is just for protobuf compatibility. - # Cleaner solution? - self.single_col_cert_common_name = "" + if single_col_cert_common_name: + self.logger.warning( + "You are running in single collaborator certificate mode. " + "This mode is intended for development settings only and does not " + "provide proper Public Key Infrastructure (PKI) security. " + "Please use this mode with caution." + ) + # FIXME: "" instead of None is for protobuf compatibility. + self.single_col_cert_common_name = single_col_cert_common_name or "" self.straggler_handling_policy = ( straggler_handling_policy or CutoffTimeBasedStragglerHandling() @@ -143,7 +146,6 @@ def __init__( self.db_store_rounds = db_store_rounds # Gathered together logging-related objects - self.logger = getLogger(__name__) self.write_logs = write_logs self.log_metric_callback = log_metric_callback @@ -166,10 +168,10 @@ def __init__( self.best_tensor_dict: dict = {} self.last_tensor_dict: dict = {} - if kwargs.get("initial_tensor_dict", None) is not None: - self._load_initial_tensors_from_dict(kwargs["initial_tensor_dict"]) + if initial_tensor_dict: + self._load_initial_tensors_from_dict(initial_tensor_dict) self.model = utils.construct_model_proto( - tensor_dict=kwargs["initial_tensor_dict"], + tensor_dict=initial_tensor_dict, round_number=0, tensor_pipe=self.compression_pipeline, ) @@ -326,9 +328,7 @@ def _time_to_quit(self): Returns: bool: True if it's time to quit, False otherwise. """ - if self.round_number >= self.rounds_to_train: - return True - return False + return self.round_number >= self.rounds_to_train def get_tasks(self, collaborator_name): """RPC called by a collaborator to determine which tasks to perform. @@ -1056,19 +1056,10 @@ def _is_collaborator_done(self, collaborator_name: str, round_number: int) -> No if all_tasks_completed: self.collaborators_done.append(collaborator_name) self.logger.info( - f"Round: {self.round_number}, Collaborators that have completed all tasks: " + f"Round {self.round_number}: Collaborators that have completed all tasks: " f"{self.collaborators_done}" ) - def _log_big_warning(self): - """Warn user about single collaborator cert mode.""" - self.logger.warning( - f"\n{the_dragon}\nYOU ARE RUNNING IN SINGLE COLLABORATOR CERT MODE! THIS IS" - f" NOT PROPER PKI AND " - f"SHOULD ONLY BE USED IN DEVELOPMENT SETTINGS!!!! YE HAVE BEEN" - f" WARNED!!!" - ) - def stop(self, failed_collaborator: str = None) -> None: """Stop aggregator execution. @@ -1092,76 +1083,3 @@ def stop(self, failed_collaborator: str = None) -> None: collaborator_name, ) self.quit_job_sent_to.append(collaborator_name) - - -the_dragon = """ - - ,@@.@@+@@##@,@@@@.`@@#@+ *@@@@ #@##@ `@@#@# @@@@@ @@ @@@@` #@@@ :@@ `@#`@@@#.@ - @@ #@ ,@ +. @@.@* #@ :` @+*@ .@`+. @@ *@::@`@@ @@# @@ #`;@`.@@ @@@`@`#@* +:@` - @@@@@ ,@@@ @@@@ +@@+ @@@@ .@@@ @@ .@+:@@@: .;+@` @@ ,;,#@` @@ @@@@@ ,@@@* @ - @@ #@ ,@`*. @@.@@ #@ ,; `@+,@#.@.*` @@ ,@::@`@@` @@@@# @@`:@;*@+ @@ @`:@@`@ *@@ ` - .@@`@@,+@+;@.@@ @@`@@;*@ ;@@#@:*@+;@ `@@;@@ #@**@+;@ `@@:`@@@@ @@@@.`@+ .@ +@+@*,@ - `` `` ` `` . ` ` ` ` ` .` ` `` `` `` ` . ` - - - - .** - ;` `****: - @**`******* - *** +***********; - ,@***;` .*:,;************ - ;***********@@*********** - ;************************, - `************************* - ************************* - ,************************ - **#********************* - *@****` :**********; - +**; .********. - ;*; `*******#: `,: - ****@@@++:: ,,;***. - *@@@**;#;: +: **++*, - @***#@@@: +*; ,**** - @*@+**** ***` ****, - ,@#******. , **** **;,**. - * ******** :, ;*:*+ ** :,** - # ********:: *,.*:**` * ,*; - . *********: .+,*:;*: : `:** - ; :********: ***::** ` ` ** - + :****::*** , *;;::**` :* - `` .****::;**::: *;::::*; ;* - * *****::***:. **::::** ;: - # *****;:**** ;*::;*** ,*` - ; ************` ,**:****; ::* - : *************;:;*;*++: *. - : *****************;* `* - `. `*****************; : *. - .` .*+************+****;: :* - `. :;+***********+******;` : .,* - ; ::*+*******************. `:: .`:. - + :::**********************;;:` * - + ,::;*************;:::*******. * - # `:::+*************:::;******** :, * - @ :::***************;:;*********;:, * - @ ::::******:*********************: ,:* - @ .:::******:;*********************, :* - # :::******::******###@*******;;**** *, - # .::;*****::*****#****@*****;:::***; `` ** - * ::;***********+*****+#******::*****,,,,** - : :;***********#******#****************** - .` `;***********#******+****+************ - `, ***#**@**+***+*****+**************;` - ; *++**#******#+****+` `.,.. - + `@***#*******#****# - + +***@********+**+: - * .+**+;**;;;**;#**# - ,` ****@ +*+: - # +**+ :+** - @ ;**+, ,***+ - # #@+**** *#****+ - `; @+***+@ `#**+#++ - # #*#@##, .++:.,# - `* @# +. - @@@ - # `@ - , """ diff --git a/openfl/component/collaborator/collaborator.py b/openfl/component/collaborator/collaborator.py index 54683135d5..119efd5eee 100644 --- a/openfl/component/collaborator/collaborator.py +++ b/openfl/component/collaborator/collaborator.py @@ -82,7 +82,6 @@ def __init__( compression_pipeline=None, db_store_rounds=1, log_memory_usage=False, - **kwargs, ): """Initialize the Collaborator object. @@ -104,7 +103,6 @@ def __init__( Defaults to None. db_store_rounds (int, optional): The number of rounds to store in the database. Defaults to 1. - **kwargs: Variable length argument list. """ self.single_col_cert_common_name = None diff --git a/openfl/federated/task/runner_keras.py b/openfl/federated/task/runner_keras.py index b33b20fde5..2d992c83bb 100644 --- a/openfl/federated/task/runner_keras.py +++ b/openfl/federated/task/runner_keras.py @@ -194,7 +194,7 @@ def train_iteration(self, batch_generator, metrics: list = None, **kwargs): f"Param_metrics = {metrics}, model_metrics_names = {model_metrics_names}" ) - history = self.model.fit(batch_generator, verbose=1, **kwargs) + history = self.model.fit(batch_generator, verbose=2, **kwargs) results = [] for metric in metrics: value = np.mean([history.history[metric]]) @@ -224,7 +224,7 @@ def validate(self, col_name, round_num, input_tensor_dict, **kwargs): self.rebuild_model(round_num, input_tensor_dict, validation=True) param_metrics = kwargs["metrics"] - vals = self.model.evaluate(self.data_loader.get_valid_loader(batch_size), verbose=1) + vals = self.model.evaluate(self.data_loader.get_valid_loader(batch_size), verbose=2) model_metrics_names = self.model.metrics_names if type(vals) is not list: vals = [vals] From 4ac343084c7327fb2106ce2fa51cbb859ef6e26c Mon Sep 17 00:00:00 2001 From: Karan Shah Date: Tue, 10 Dec 2024 11:04:23 +0530 Subject: [PATCH 2/5] Restore `openfl-tutorials` as installable package (#1203) * Add openfl-tutorials as package Signed-off-by: Shah, Karan * Add __init__.py Signed-off-by: Shah, Karan * Add nbformat pkg Signed-off-by: Shah, Karan * Try localhost Signed-off-by: Shah, Karan * Revert "Try localhost" This reverts commit 44b83044cbff55cc13aea2a9a3987cf38cce2a51. Signed-off-by: Shah, Karan * Try python3.10 Signed-off-by: Shah, Karan * Try localhost Signed-off-by: Shah, Karan --------- Signed-off-by: Shah, Karan --- .github/workflows/experimental_workflow_tests.yml | 8 ++++++-- openfl-tutorials/__init__.py | 0 .../workflow/workflow_interface_requirements.txt | 1 + setup.py | 10 +++++++++- 4 files changed, 16 insertions(+), 3 deletions(-) create mode 100644 openfl-tutorials/__init__.py diff --git a/.github/workflows/experimental_workflow_tests.yml b/.github/workflows/experimental_workflow_tests.yml index 030f42ed14..84166ab362 100644 --- a/.github/workflows/experimental_workflow_tests.yml +++ b/.github/workflows/experimental_workflow_tests.yml @@ -10,6 +10,10 @@ on: permissions: contents: read +env: + # A workaround for long FQDN names provided by GitHub actions. + FQDN: "localhost" + jobs: build: if: (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'workflow_interface')) @@ -18,10 +22,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.10" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/openfl-tutorials/__init__.py b/openfl-tutorials/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt index 6b4abbf54d..fd3a372667 100644 --- a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt +++ b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt @@ -5,6 +5,7 @@ dill==0.3.6 matplotlib>=2.0.0 metaflow==2.7.15 nbdev==2.3.12 +nbformat ray==2.9.2 torch torchvision diff --git a/setup.py b/setup.py index 1151bb0956..e5018fb12c 100644 --- a/setup.py +++ b/setup.py @@ -66,7 +66,15 @@ def run(self): long_description=open("README.md", encoding="utf-8").read(), long_description_content_type='text/markdown', url='https://github.com/securefederatedai/openfl', - packages=find_packages(include=("openfl", "openfl.*", "openfl-docker", "openfl-workspace")), + packages=find_packages( + include=( + "openfl", + "openfl.*", + "openfl-docker", + "openfl-workspace", + "openfl-tutorials", + ) + ), include_package_data=True, setup_requires=['grpcio-tools>=1.56.2,<1.66.0'], # ensure it is in-sync with `install_requires` install_requires=[ From 44614b3b41d48803ec1013941d1d8cf87e625ccc Mon Sep 17 00:00:00 2001 From: Payal Chaurasiya Date: Tue, 10 Dec 2024 18:05:53 +0530 Subject: [PATCH 3/5] Add Test to verify memory logs (#1191) * Typo fix Signed-off-by: Chaurasiya, Payal * Add memory logs test Signed-off-by: Chaurasiya, Payal * Add memory test in e2e Signed-off-by: Chaurasiya, Payal * Add memory test in e2e Signed-off-by: Chaurasiya, Payal * Simplify setup (#1187) * Simplify setup.py Signed-off-by: Shah, Karan * Remove tensorboardX and never-used log_metric code Signed-off-by: Shah, Karan * Test reducing requirements Signed-off-by: Shah, Karan * Revert "Remove tensorboardX and never-used log_metric code" and add fn calls Signed-off-by: Shah, Karan * Revert tb removal Signed-off-by: Shah, Karan * Disable tensorboard logging for gramine CI test Signed-off-by: Shah, Karan --------- Signed-off-by: Shah, Karan Signed-off-by: Chaurasiya, Payal * Fix for TF (2.13) cnn histology workspace 'Adam' object has no attribute 'weights' issue (#1194) Signed-off-by: yes Signed-off-by: Chaurasiya, Payal * fix(deps): requests min version set to 2.32.0 (#1198) Signed-off-by: Pant, Akshay Signed-off-by: Chaurasiya, Payal * Pin GaNDLF version to 0.1.1 (#1179) * fix(gandlf ci): pinned torchaudio version Signed-off-by: Pant, Akshay * fix(gandlf ci): install torch without cache Signed-off-by: Pant, Akshay * fix(gandlf ci): upgrade torch to 2.5.0 Signed-off-by: Pant, Akshay * fix(gandlf ci): pin gandlf to 0.1.1 Signed-off-by: Pant, Akshay --------- Signed-off-by: Pant, Akshay Signed-off-by: Chaurasiya, Payal * Migrate `shell/*` to `scripts/*` (#1193) * Update distribution scripts Signed-off-by: Shah, Karan * Migrate shell/ to scripts/ Signed-off-by: Shah, Karan * Remove lint test from ubuntu CI Signed-off-by: Shah, Karan --------- Signed-off-by: Shah, Karan Signed-off-by: Chaurasiya, Payal * Set timeouts for all CI workflows (#1200) * Set timeouts for all CI workflows Signed-off-by: Shah, Karan * forgot to add this too Signed-off-by: Shah, Karan --------- Signed-off-by: Shah, Karan Signed-off-by: Chaurasiya, Payal * Review comments Signed-off-by: Chaurasiya, Payal * Add details in file for further use Signed-off-by: Chaurasiya, Payal * Fix for tf_3dunet_barts workspace (#1197) Signed-off-by: yes Signed-off-by: Chaurasiya, Payal * Update task_runner_e2e.yml Signed-off-by: Chaurasiya, Payal * OpenFL roadmap update (#1196) * OpenFL 1.7 roadmap update Signed-off-by: Teodor Parvanov * Addressing review comments Signed-off-by: Teodor Parvanov --------- Signed-off-by: Teodor Parvanov Signed-off-by: Chaurasiya, Payal * Update log verbosity (#1202) Signed-off-by: Shah, Karan Signed-off-by: Chaurasiya, Payal * Restore `openfl-tutorials` as installable package (#1203) * Add openfl-tutorials as package Signed-off-by: Shah, Karan * Add __init__.py Signed-off-by: Shah, Karan * Add nbformat pkg Signed-off-by: Shah, Karan * Try localhost Signed-off-by: Shah, Karan * Revert "Try localhost" This reverts commit 44b83044cbff55cc13aea2a9a3987cf38cce2a51. Signed-off-by: Shah, Karan * Try python3.10 Signed-off-by: Shah, Karan * Try localhost Signed-off-by: Shah, Karan --------- Signed-off-by: Shah, Karan Signed-off-by: Chaurasiya, Payal * Fix for ubuntu Signed-off-by: Chaurasiya, Payal * Writing memory details in json Signed-off-by: Chaurasiya, Payal * Update task_runner_e2e.yml Signed-off-by: Chaurasiya, Payal * E501 fix Signed-off-by: Chaurasiya, Payal * Lint changes Signed-off-by: Chaurasiya, Payal --------- Signed-off-by: Chaurasiya, Payal Signed-off-by: Shah, Karan Signed-off-by: yes Signed-off-by: Pant, Akshay Signed-off-by: Teodor Parvanov Co-authored-by: Karan Shah Co-authored-by: Shailesh Tanwar <135304487+tanwarsh@users.noreply.github.com> Co-authored-by: Akshay Pant <16561942+theakshaypant@users.noreply.github.com> Co-authored-by: teoparvanov --- .github/workflows/task_runner_e2e.yml | 67 +++++++++++++++++ .github/workflows/ubuntu.yml | 3 +- openfl/component/aggregator/aggregator.py | 3 +- openfl/component/collaborator/collaborator.py | 5 +- openfl/utilities/logs.py | 17 +++++ tests/end_to_end/conftest.py | 73 ++++++++++--------- .../test_suites/memory_logs_tests.py | 73 +++++++++++++++++++ .../test_suites/task_runner_tests.py | 21 +++--- tests/end_to_end/utils/federation_helper.py | 57 ++++++++++++++- 9 files changed, 268 insertions(+), 51 deletions(-) create mode 100644 tests/end_to_end/test_suites/memory_logs_tests.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 567cce7ba7..4801ab6fad 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -230,3 +230,70 @@ jobs: with: name: tr_no_client_auth_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} path: result.tar + + test_memory_logs: + name: tr_tls_memory_logs + runs-on: ubuntu-22.04 + timeout-minutes: 15 + strategy: + matrix: + # Testing non TLS scenario only for torch_cnn_mnist model and python 3.10 + # If required, this can be extended to other models and python versions + model_name: ["torch_cnn_mnist"] + python_version: ["3.10"] + fail-fast: false # do not immediately fail if one of the combinations fail + + env: + MODEL_NAME: ${{ matrix.model_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + + steps: + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r test-requirements.txt + + - name: Run Task Runner E2E tests without TLS + id: run_tests + run: | + python -m pytest -s tests/end_to_end/test_suites/memory_logs_tests.py \ + --model_name ${{ env.MODEL_NAME }} --num_rounds ${{ env.NUM_ROUNDS }} \ + --num_collaborators ${{ env.NUM_COLLABORATORS }} --log_memory_usage + echo "Task runner memory logs test run completed" + + - name: Print test summary + id: print_test_summary + if: ${{ always() }} + run: | + export PYTHONPATH="$PYTHONPATH:." + python tests/end_to_end/utils/summary_helper.py + echo "Test summary printed" + + - name: Tar files + id: tar_files + if: ${{ always() }} + run: tar -cvf result.tar results + + - name: Upload Artifacts + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: ${{ always() }} + with: + name: tr_tls_memory_logs_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 70a195e70b..747ca067a4 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -13,7 +13,6 @@ env: jobs: pytest-coverage: # from pytest_coverage.yml - needs: lint runs-on: ubuntu-latest timeout-minutes: 15 @@ -35,7 +34,7 @@ jobs: coverage report cli: - needs: [lint, pytest-coverage] + needs: [pytest-coverage] runs-on: ubuntu-latest timeout-minutes: 15 diff --git a/openfl/component/aggregator/aggregator.py b/openfl/component/aggregator/aggregator.py index 041022e300..1e34aa7e92 100644 --- a/openfl/component/aggregator/aggregator.py +++ b/openfl/component/aggregator/aggregator.py @@ -15,7 +15,7 @@ from openfl.pipelines import NoCompressionPipeline, TensorCodec from openfl.protocols import base_pb2, utils from openfl.utilities import TaskResultKey, TensorKey, change_tags -from openfl.utilities.logs import get_memory_usage, write_metric +from openfl.utilities.logs import get_memory_usage, write_memory_usage_to_file, write_metric class Aggregator: @@ -1013,6 +1013,7 @@ def _end_of_round_check(self): if self._time_to_quit(): if self.log_memory_usage: self.logger.info(f"Publish memory usage: {self.memory_details}") + write_memory_usage_to_file(self.memory_details, "aggregator_memory_usage.json") self.logger.info("Experiment Completed. Cleaning up...") else: self.logger.info("Starting round %s...", self.round_number) diff --git a/openfl/component/collaborator/collaborator.py b/openfl/component/collaborator/collaborator.py index 119efd5eee..08f19b9d94 100644 --- a/openfl/component/collaborator/collaborator.py +++ b/openfl/component/collaborator/collaborator.py @@ -13,7 +13,7 @@ from openfl.pipelines import NoCompressionPipeline, TensorCodec from openfl.protocols import utils from openfl.utilities import TensorKey -from openfl.utilities.logs import get_memory_usage +from openfl.utilities.logs import get_memory_usage, write_memory_usage_to_file class DevicePolicy(Enum): @@ -181,6 +181,9 @@ def run(self): memory_details.append(memory_detail) if self.log_memory_usage: self.logger.info(f"Publish memory usage: {memory_details}") + write_memory_usage_to_file( + memory_details, f"{self.collaborator_name}_memory_usage.json" + ) self.logger.info("End of Federation reached. Exiting...") diff --git a/openfl/utilities/logs.py b/openfl/utilities/logs.py index a17f0742fc..ce64b5f7fb 100644 --- a/openfl/utilities/logs.py +++ b/openfl/utilities/logs.py @@ -4,6 +4,7 @@ """Logs utilities.""" +import json import logging import os @@ -92,3 +93,19 @@ def get_memory_usage() -> dict: }, } return memory_usage + + +def write_memory_usage_to_file(memory_usage_dict, file_name): + """ + Write memory usage details to a file. + + Args: + memory_usage_dict (dict): The memory usage details to write. + file_name (str): The name of the file to write to. + + Returns: + None + """ + file_path = os.path.join("logs", file_name) + with open(file_path, "w") as f: + json.dump(memory_usage_dict, f, indent=4) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 22eed9d101..098d1fbe61 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -17,7 +17,7 @@ # Define a named tuple to store the objects for model owner, aggregator, and collaborators federation_fixture = collections.namedtuple( "federation_fixture", - "model_owner, aggregator, collaborators, model_name, require_client_auth, use_tls, workspace_path, results_dir, num_rounds", + "model_owner, aggregator, collaborators, workspace_path", ) def pytest_addoption(parser): @@ -180,6 +180,24 @@ def pytest_sessionfinish(session, exitstatus): log.debug(f"Cleared .pytest_cache directory at {cache_dir}") +def pytest_configure(config): + """ + Configure the pytest plugin. + Args: + config: pytest config object + """ + # Declare some global variables + args = parse_arguments() + # Use the model name from the test case name if not provided as a command line argument + config.model_name = args.model_name + config.num_collaborators = args.num_collaborators + config.num_rounds = args.num_rounds + config.require_client_auth = not args.disable_client_auth + config.use_tls = not args.disable_tls + config.log_memory_usage = args.log_memory_usage + config.results_dir = config.getini("results_dir") + + @pytest.fixture(scope="function") def fx_federation(request, pytestconfig): """ @@ -196,38 +214,26 @@ def fx_federation(request, pytestconfig): """ collaborators = [] agg_domain_name = "localhost" - - # Parse the command line arguments - args = parse_arguments() - # Use the model name from the test case name if not provided as a command line argument - model_name = args.model_name if args.model_name else request.node.name.split("test_")[1] - results_dir = pytestconfig.getini("results_dir") - num_collaborators = args.num_collaborators - num_rounds = args.num_rounds - require_client_auth = not args.disable_client_auth - use_tls = not args.disable_tls - log_memory_usage = args.log_memory_usage - log.info( f"Running federation setup using Task Runner API on single machine with below configurations:\n" - f"\tNumber of collaborators: {num_collaborators}\n" - f"\tNumber of rounds: {num_rounds}\n" - f"\tModel name: {model_name}\n" - f"\tClient authentication: {require_client_auth}\n" - f"\tTLS: {use_tls}\n" - f"\tMemory Logs: {log_memory_usage}" + f"\tNumber of collaborators: {request.config.num_collaborators}\n" + f"\tNumber of rounds: {request.config.num_rounds}\n" + f"\tModel name: {request.config.model_name}\n" + f"\tClient authentication: {request.config.require_client_auth}\n" + f"\tTLS: {request.config.use_tls}\n" + f"\tMemory Logs: {request.config.log_memory_usage}" ) # Validate the model name and create the workspace name - if not model_name.upper() in constants.ModelName._member_names_: - raise ValueError(f"Invalid model name: {model_name}") + if not request.config.model_name.upper() in constants.ModelName._member_names_: + raise ValueError(f"Invalid model name: {request.config.model_name}") - workspace_name = f"workspace_{model_name}" + workspace_name = request.config.model_name # Create model owner object and the workspace for the model - model_owner = participants.ModelOwner(workspace_name, model_name, log_memory_usage) + model_owner = participants.ModelOwner(workspace_name, request.config.model_name, request.config.log_memory_usage) try: - workspace_path = model_owner.create_workspace(results_dir=results_dir) + workspace_path = model_owner.create_workspace(results_dir=request.config.results_dir) except Exception as e: log.error(f"Failed to create the workspace: {e}") raise e @@ -235,19 +241,19 @@ def fx_federation(request, pytestconfig): # Modify the plan try: model_owner.modify_plan( - new_rounds=num_rounds, - num_collaborators=num_collaborators, - require_client_auth=require_client_auth, - use_tls=use_tls, + new_rounds=request.config.num_rounds, + num_collaborators=request.config.num_collaborators, + require_client_auth=request.config.require_client_auth, + use_tls=request.config.use_tls, ) except Exception as e: log.error(f"Failed to modify the plan: {e}") raise e - if not use_tls: + if not request.config.use_tls: log.info("Disabling TLS for communication") try: - model_owner.register_collaborators(num_collaborators) + model_owner.register_collaborators(request.config.num_collaborators) except Exception as e: log.error(f"Failed to register the collaborators: {e}") raise e @@ -271,7 +277,7 @@ def fx_federation(request, pytestconfig): agg_domain_name=agg_domain_name, workspace_path=workspace_path ) - for i in range(num_collaborators): + for i in range(request.config.num_collaborators): collaborator = participants.Collaborator( collaborator_name=f"collaborator{i+1}", data_directory_path=i + 1, @@ -285,10 +291,5 @@ def fx_federation(request, pytestconfig): model_owner=model_owner, aggregator=aggregator, collaborators=collaborators, - model_name=model_name, - require_client_auth=require_client_auth, - use_tls=use_tls, workspace_path=workspace_path, - results_dir=results_dir, - num_rounds=num_rounds, ) diff --git a/tests/end_to_end/test_suites/memory_logs_tests.py b/tests/end_to_end/test_suites/memory_logs_tests.py new file mode 100644 index 0000000000..d763c419d0 --- /dev/null +++ b/tests/end_to_end/test_suites/memory_logs_tests.py @@ -0,0 +1,73 @@ +# Copyright 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import logging +import os +import json + +from tests.end_to_end.utils import federation_helper as fed_helper + +log = logging.getLogger(__name__) + + +@pytest.mark.log_memory_usage +def test_log_memory_usage(request, fx_federation): + """ + This module contains end-to-end tests for logging memory usage in a federated learning setup. + Test Suite: + - test_log_memory_usage: Tests the memory usage logging functionality for the torch_cnn_mnist model. + Functions: + - test_log_memory_usage(request, fx_federation): + Test the memory usage logging functionality in a federated learning setup. + Parameters: + - request: The pytest request object containing configuration options. + - fx_federation: The fixture representing the federated learning setup. + Steps: + 1. Skip the test if memory usage logging is disabled. + 2. Setup PKI for trusted communication if TLS is enabled. + 3. Start the federation and verify its completion. + 4. Verify the existence of memory usage logs for the aggregator. + 5. Verify the memory usage details for each round. + 6. Verify the existence and details of memory usage logs for each collaborator. + 7. Log the availability of memory usage details for all participants. + """ + # Skip test if fx_federation.log_memory_usage is False + if not request.config.log_memory_usage: + pytest.skip("Memory usage logging is disabled") + + # Setup PKI for trusted communication within the federation + if request.config.use_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results, \ + num_rounds=request.config.num_rounds), "Federation completion failed" + # Verify the aggregator memory logs + aggregator_memory_usage_file = os.path.join(fx_federation.workspace_path, "logs", "aggregator_memory_usage.json") + assert os.path.exists(aggregator_memory_usage_file), "Aggregator memory usage file is not available" + + # Log the aggregator memory usage details + memory_usage_dict = json.load(open(aggregator_memory_usage_file)) + + # check memory usage entries for each round + assert len(memory_usage_dict) == request.config.num_rounds, \ + "Memory usage details are not available for all rounds" + + # check memory usage entries for each collaborator + for collaborator in fx_federation.collaborators: + collaborator_memory_usage_file = os.path.join(fx_federation.workspace_path, + "logs", + f"{collaborator.collaborator_name}_memory_usage.json") + + assert os.path.exists(collaborator_memory_usage_file), f"Memory usage file for collaborator {collaborator.collaborator_name} is not available" + + memory_usage_dict = json.load(open(collaborator_memory_usage_file)) + + assert len(memory_usage_dict) == request.config.num_rounds, \ + f"Memory usage details are not available for all rounds for collaborator {collaborator.collaborator_name}" + + log.info("Memory usage details are available for all participants") diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 43946d61fb..077ed642dd 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -10,51 +10,54 @@ @pytest.mark.torch_cnn_mnist -def test_torch_cnn_mnist(fx_federation): +def test_torch_cnn_mnist(request, fx_federation): """ Test for torch_cnn_mnist model. """ log.info("Testing torch_cnn_mnist model") # Setup PKI for trusted communication within the federation - if fx_federation.use_tls: + if request.config.use_tls: assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + assert fed_helper.verify_federation_run_completion(fx_federation, results, + num_rounds=request.config.num_rounds), "Federation completion failed" @pytest.mark.keras_cnn_mnist -def test_keras_cnn_mnist(fx_federation): +def test_keras_cnn_mnist(request, fx_federation): log.info("Testing keras_cnn_mnist model") # Setup PKI for trusted communication within the federation - if fx_federation.use_tls: + if request.config.use_tls: assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + assert fed_helper.verify_federation_run_completion(fx_federation, results, + num_rounds=request.config.num_rounds), "Federation completion failed" @pytest.mark.torch_cnn_histology -def test_torch_cnn_histology(fx_federation): +def test_torch_cnn_histology(request, fx_federation): """ Test for torch_cnn_histology model """ log.info("Testing torch_cnn_histology model") # Setup PKI for trusted communication within the federation - if fx_federation.use_tls: + if request.config.use_tls: assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + assert fed_helper.verify_federation_run_completion(fx_federation, results, + num_rounds=request.config.num_rounds), "Federation completion failed" diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index d87667077e..dc39323fa9 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -4,6 +4,8 @@ import time import concurrent.futures import logging +import json +import re from tests.end_to_end.utils.constants import SUCCESS_MARKER @@ -68,12 +70,13 @@ def run_federation(fed_obj): return results -def verify_federation_run_completion(fed_obj, results): +def verify_federation_run_completion(fed_obj, results, num_rounds): """ Verify the completion of the process for all the participants Args: fed_obj (object): Federation fixture object results (list): List of results + num_rounds (int): Number of rounds Returns: list: List of response (True or False) for all the participants """ @@ -85,7 +88,7 @@ def verify_federation_run_completion(fed_obj, results): executor.submit( _verify_completion_for_participant, participant, - fed_obj.num_rounds, + num_rounds, results[i] ) for i, participant in enumerate(fed_obj.collaborators + [fed_obj.aggregator]) @@ -133,3 +136,53 @@ def _verify_completion_for_participant(participant, num_rounds, result_file, tim else: log.info(f"Process completed for {participant.name} in {time.time() - start_time} seconds") return True + + +def extract_memory_usage(log_file): + """ + Extracts memory usage data from a log file. + This function reads the content of the specified log file, searches for memory usage data + using a regular expression pattern, and returns the extracted data as a dictionary. + Args: + log_file (str): The path to the log file from which to extract memory usage data. + Returns: + dict: A dictionary containing the memory usage data. + Raises: + json.JSONDecodeError: If there is an error decoding the JSON data. + Exception: If memory usage data is not found in the log file. + """ + try: + with open(log_file, 'r') as file: + content = file.read() + + pattern = r"Publish memory usage: (\[.*?\])" + match = re.search(pattern, content, re.DOTALL) + + if match: + memory_usage_data = match.group(1) + memory_usage_data = re.sub(r'\S+\.py:\d+', '', memory_usage_data) + memory_usage_data = memory_usage_data.replace('\n', '').replace(' ', '') + memory_usage_data = memory_usage_data.replace("'", '"') + memory_usage_dict = json.loads(memory_usage_data) + return memory_usage_dict + else: + log.error("Memory usage data not found in the log file") + raise Exception("Memory usage data not found in the log file") + except Exception as e: + log.error(f"An error occurred while extracting memory usage: {e}") + raise e + +def write_memory_usage_to_file(memory_usage_dict, output_file): + """ + Writes memory usage data to a file. + This function writes the specified memory usage data to the specified output file. + Args: + memory_usage_dict (dict): A dictionary containing the memory usage data. + output_file (str): The path to the output file to which to write the memory usage data. + """ + try: + with open(output_file, 'w') as file: + json.dump(memory_usage_dict, file, indent=4) + except Exception as e: + log.error(f"An error occurred while writing memory usage data to file: {e}") + raise e From 1e20c24ef4e7fa9bd33f6d993042fe20ad23bbe8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 11 Dec 2024 06:38:12 +0530 Subject: [PATCH 4/5] Bump pytest from 8.3.3 to 8.3.4 (#1182) Bumps [pytest](https://github.com/pytest-dev/pytest) from 8.3.3 to 8.3.4. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.3.3...8.3.4) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 19bf081db1..78f26429f2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,4 @@ lxml==5.3.0 -pytest==8.3.3 +pytest==8.3.4 pytest-asyncio==0.24.0 pytest-mock==3.14.0 From 7e742ddc8e9016421b84eef86a01c72d05143424 Mon Sep 17 00:00:00 2001 From: Shailesh Tanwar <135304487+tanwarsh@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:11:28 +0530 Subject: [PATCH 5/5] Bump Python version to 3.9 - 3.11 (#1189) * removed python version <= 3.8 Signed-off-by: yes * changes as per comments Signed-off-by: yes * revert changes Signed-off-by: yes * doc updated Signed-off-by: yes * python version updated to 3.10 to 3.12 Signed-off-by: yes * python version updated to 3.10 to 3.12 Signed-off-by: yes * dummy commit Signed-off-by: yes * dummy commit Signed-off-by: yes * version changes Signed-off-by: yes * changed version t0 3.10 Signed-off-by: yes * fixed typo Signed-off-by: yes * bump tensorflow to 2.14 Signed-off-by: yes * bump tensorflow to 2.15 Signed-off-by: yes * revert tensorflo version and removed python version 3.12 Signed-off-by: yes * revert tensorflo version and removed python version 3.12 Signed-off-by: yes * removed astor Signed-off-by: yes * added nbformat lib Signed-off-by: yes * formatting fix Signed-off-by: yes * numpy version fix Signed-off-by: yes * numpy version fix Signed-off-by: yes * revert python version back to 3.9 Signed-off-by: yes * removed astor Signed-off-by: yes * numpy version change Signed-off-by: yes * numpy version change Signed-off-by: yes * numpy version change Signed-off-by: yes * switch to torch_cnn_mnist Signed-off-by: yes --------- Signed-off-by: yes --- .github/workflows/docker-bench-security.yml | 4 ++-- .github/workflows/double_ws_export.yml | 4 ++-- .github/workflows/experimental_workflow_tests.yml | 2 +- .github/workflows/hadolint.yml | 4 ++-- .github/workflows/lint.yml | 4 ++-- .github/workflows/pki.yml | 8 ++++---- .github/workflows/pytest_coverage.yml | 4 ++-- .github/workflows/straggler-handling.yml | 4 ++-- .github/workflows/task_runner_e2e.yml | 2 +- .github/workflows/taskrunner.yml | 2 +- .github/workflows/taskrunner_eden_pipeline.yml | 4 ++-- .github/workflows/tr_docker_gramine_direct.yml | 4 ++-- .github/workflows/tr_docker_native.yml | 4 ++-- .github/workflows/ubuntu.yml | 8 ++++---- .github/workflows/windows.yml | 8 ++++---- Jenkinsfile | 4 ++-- README.md | 3 ++- docs/about/features_index/taskrunner.rst | 2 +- docs/developer_guide/running_the_federation.notebook.rst | 2 +- .../running_the_federation_with_gandlf.rst | 2 +- docs/get_started/installation.rst | 2 +- .../workflow/workflow_interface_requirements.txt | 3 +-- openfl-workspace/keras_cnn_mnist/requirements.txt | 1 + openfl/experimental/workflow/workspace_export/export.py | 7 +++---- setup.py | 3 +-- tests/github/pki_wrong_cn.py | 4 ++-- 26 files changed, 49 insertions(+), 50 deletions(-) diff --git a/.github/workflows/docker-bench-security.yml b/.github/workflows/docker-bench-security.yml index 588c454187..1d1b56b486 100644 --- a/.github/workflows/docker-bench-security.yml +++ b/.github/workflows/docker-bench-security.yml @@ -16,10 +16,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/double_ws_export.yml b/.github/workflows/double_ws_export.yml index efabaaa6d6..d4d6b0459c 100644 --- a/.github/workflows/double_ws_export.yml +++ b/.github/workflows/double_ws_export.yml @@ -23,10 +23,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/experimental_workflow_tests.yml b/.github/workflows/experimental_workflow_tests.yml index 84166ab362..d99cec2e5f 100644 --- a/.github/workflows/experimental_workflow_tests.yml +++ b/.github/workflows/experimental_workflow_tests.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.10" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index 749d9fb51b..6e90292158 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -19,10 +19,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4ebdd422fb..13d53e885d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -19,10 +19,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install linters run: | python -m pip install --upgrade pip diff --git a/.github/workflows/pki.yml b/.github/workflows/pki.yml index 9cac1df031..d8ecbecb03 100644 --- a/.github/workflows/pki.yml +++ b/.github/workflows/pki.yml @@ -23,10 +23,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip @@ -40,10 +40,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/pytest_coverage.yml b/.github/workflows/pytest_coverage.yml index bf968fc5f0..9371f74e13 100644 --- a/.github/workflows/pytest_coverage.yml +++ b/.github/workflows/pytest_coverage.yml @@ -24,10 +24,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/straggler-handling.yml b/.github/workflows/straggler-handling.yml index c37fa7daf4..47b7f05709 100644 --- a/.github/workflows/straggler-handling.yml +++ b/.github/workflows/straggler-handling.yml @@ -26,10 +26,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 4801ab6fad..1c1a91ca67 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -39,7 +39,7 @@ jobs: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] model_name: ["torch_cnn_mnist", "keras_cnn_mnist"] - python_version: ["3.8", "3.9", "3.10"] + python_version: ["3.9", "3.10", "3.11"] fail-fast: false # do not immediately fail if one of the combinations fail env: diff --git a/.github/workflows/taskrunner.yml b/.github/workflows/taskrunner.yml index 8f1bcc35dc..1e3a3ab6c2 100644 --- a/.github/workflows/taskrunner.yml +++ b/.github/workflows/taskrunner.yml @@ -21,7 +21,7 @@ jobs: strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] - python-version: ['3.8','3.9','3.10','3.11'] + python-version: ["3.9", "3.10", "3.11"] runs-on: ${{ matrix.os }} timeout-minutes: 15 diff --git a/.github/workflows/taskrunner_eden_pipeline.yml b/.github/workflows/taskrunner_eden_pipeline.yml index 2cb0e9effc..44de314a5f 100644 --- a/.github/workflows/taskrunner_eden_pipeline.yml +++ b/.github/workflows/taskrunner_eden_pipeline.yml @@ -22,10 +22,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/tr_docker_gramine_direct.yml b/.github/workflows/tr_docker_gramine_direct.yml index be2afea5f9..855a059c98 100644 --- a/.github/workflows/tr_docker_gramine_direct.yml +++ b/.github/workflows/tr_docker_gramine_direct.yml @@ -17,10 +17,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/tr_docker_native.yml b/.github/workflows/tr_docker_native.yml index 899fcd8296..f97b268636 100644 --- a/.github/workflows/tr_docker_native.yml +++ b/.github/workflows/tr_docker_native.yml @@ -17,10 +17,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 747ca067a4..4c3c99af43 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -18,10 +18,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip @@ -40,10 +40,10 @@ jobs: steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 6d8330d553..1e5b245cc1 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -17,10 +17,10 @@ jobs: timeout-minutes: 15 steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip @@ -38,10 +38,10 @@ jobs: timeout-minutes: 15 steps: - uses: actions/checkout@v3 - - name: Set up Python 3.8 + - name: Set up Python 3 uses: actions/setup-python@v3 with: - python-version: "3.8" + python-version: "3.9" - name: Install dependencies run: | python -m pip install --upgrade pip diff --git a/Jenkinsfile b/Jenkinsfile index b4e012ad32..c1904d0453 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -81,7 +81,7 @@ pipeline { SNYK_ALLOW_LONG_PROJECT_NAME = true SNYK_USE_MULTI_PROC = true SNYK_DEBUG = true - SNYK_PYTHON_VERSION = '3.8' + SNYK_PYTHON_VERSION = '3.9' BANDIT_SOURCE_PATH = 'openfl/ openfl-workspace/ openfl-tutorials/' BANDIT_SEVERITY_LEVEL = 'high' @@ -114,7 +114,7 @@ pipeline { stage('Build Package') { agent { docker { - image 'python:3.8' + image 'python:3.9' } } steps { diff --git a/README.md b/README.md index f1e07aabbe..eb5a28e8d4 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,8 @@ -[![PyPI - Python Version](https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue)](https://pypi.org/project/openfl/) + +[![PyPI - Python Version](https://img.shields.io/badge/python-3.9%20%7C%203.10%20%7C%203.11-blue)](https://pypi.org/project/openfl/) [![Ubuntu CI status](https://github.com/securefederatedai/openfl/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/securefederatedai/openfl/actions/workflows/ubuntu.yml) [![Windows CI status](https://github.com/securefederatedai/openfl/actions/workflows/windows.yml/badge.svg)](https://github.com/securefederatedai/openfl/actions/workflows/windows.yml) [![Documentation Status](https://readthedocs.org/projects/openfl/badge/?version=latest)](https://openfl.readthedocs.io/en/latest/?badge=latest) diff --git a/docs/about/features_index/taskrunner.rst b/docs/about/features_index/taskrunner.rst index f4e3cc0730..75b408f0d2 100644 --- a/docs/about/features_index/taskrunner.rst +++ b/docs/about/features_index/taskrunner.rst @@ -147,7 +147,7 @@ Bare Metal Approach STEP 1: Create a Workspace ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. Start a Python 3.8 (>=3.6, <3.11) virtual environment and confirm |productName| is available. +1. Start a Python 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available. .. code-block:: console diff --git a/docs/developer_guide/running_the_federation.notebook.rst b/docs/developer_guide/running_the_federation.notebook.rst index d335c112d0..f5bd9c12ff 100644 --- a/docs/developer_guide/running_the_federation.notebook.rst +++ b/docs/developer_guide/running_the_federation.notebook.rst @@ -17,7 +17,7 @@ You will start a Jupyter\* \ lab server and receive a URL you can use to access Start the Tutorials =================== -1. Start a Python\* \ 3.8 (>=3.6, <3.9) virtual environment and confirm |productName| is available. +1. Start a Python\* \ 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available. .. code-block:: python diff --git a/docs/developer_guide/running_the_federation_with_gandlf.rst b/docs/developer_guide/running_the_federation_with_gandlf.rst index 34d5445233..331b17c804 100644 --- a/docs/developer_guide/running_the_federation_with_gandlf.rst +++ b/docs/developer_guide/running_the_federation_with_gandlf.rst @@ -114,7 +114,7 @@ However, continue with the following procedure for details in creating a federat STEP 1: Install GaNDLF prerequisites and Create a Workspace ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -1. Start a Python 3.8 (>=3.7, <3.11) virtual environment and confirm |productName| is available. +1. Start a Python 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available. .. code-block:: python diff --git a/docs/get_started/installation.rst b/docs/get_started/installation.rst index 0b246ddb7d..2e79c92250 100644 --- a/docs/get_started/installation.rst +++ b/docs/get_started/installation.rst @@ -19,7 +19,7 @@ Install the Package Follow this procedure to prepare the environment and install the |productName| package. Perform this procedure on every node in the federation. -1. Install a Python 3.8 (>=3.6, <3.9) virtual environment using venv. +1. Install a Python 3.9 (>=3.9, <3.12) virtual environment using venv. See the `Venv installation guide `_ for details. diff --git a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt index fd3a372667..4dc2bc32cb 100644 --- a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt +++ b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt @@ -1,11 +1,10 @@ -astor==0.8.1 chardet charset-normalizer dill==0.3.6 matplotlib>=2.0.0 metaflow==2.7.15 nbdev==2.3.12 -nbformat +nbformat==5.10.4 ray==2.9.2 torch torchvision diff --git a/openfl-workspace/keras_cnn_mnist/requirements.txt b/openfl-workspace/keras_cnn_mnist/requirements.txt index af80212eeb..4ce0afb4ce 100644 --- a/openfl-workspace/keras_cnn_mnist/requirements.txt +++ b/openfl-workspace/keras_cnn_mnist/requirements.txt @@ -1 +1,2 @@ +numpy==1.23.5 tensorflow==2.13 diff --git a/openfl/experimental/workflow/workspace_export/export.py b/openfl/experimental/workflow/workspace_export/export.py index 7df2a7684b..0e00e2efd6 100644 --- a/openfl/experimental/workflow/workspace_export/export.py +++ b/openfl/experimental/workflow/workspace_export/export.py @@ -13,7 +13,6 @@ from pathlib import Path from shutil import copytree -import astor import nbformat import yaml from nbdev.export import nb_export @@ -194,13 +193,13 @@ def __extract_class_initializing_args(self, class_name): # noqa: C901 # Use the variable name as the argument value instantiation_args["args"][arg.id] = arg.id elif isinstance(arg, ast.Constant): - instantiation_args["args"][arg.s] = astor.to_source(arg) + instantiation_args["args"][arg.s] = ast.unparse(arg) else: - instantiation_args["args"][arg.arg] = astor.to_source(arg).strip() + instantiation_args["args"][arg.arg] = ast.unparse(arg).strip() for kwarg in node.keywords: # Iterate through keyword arguments - value = astor.to_source(kwarg.value).strip() + value = ast.unparse(kwarg.value).strip() # If paranthese or brackets around the value is # found and it's not tuple or list remove diff --git a/setup.py b/setup.py index e5018fb12c..8e2d1b1c68 100644 --- a/setup.py +++ b/setup.py @@ -95,7 +95,7 @@ def run(self): 'protobuf>=4.22,<6.0.0', 'grpcio>=1.56.2,<1.66.0', ], - python_requires='>=3.8, <3.12', + python_requires='>=3.9, <3.12', project_urls={ 'Bug Tracker': 'https://github.com/securefederatedai/openfl/issues', 'Documentation': 'https://openfl.readthedocs.io/en/stable/', @@ -109,7 +109,6 @@ def run(self): 'Topic :: System :: Distributed Computing', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', diff --git a/tests/github/pki_wrong_cn.py b/tests/github/pki_wrong_cn.py index 7ce669d707..33eaeb0a9d 100644 --- a/tests/github/pki_wrong_cn.py +++ b/tests/github/pki_wrong_cn.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020-2023 Intel Corporation +# Copyright (C) 2020-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import grpc import subprocess @@ -75,7 +75,7 @@ def start_aggregator(): subprocess.check_call([ 'fx', 'workspace', 'create', '--prefix', prefix, - '--template', 'keras_cnn_mnist' + '--template', 'torch_cnn_mnist' ]) os.chdir(prefix) fqdn = getfqdn_env()