diff --git a/.github/workflows/docker-bench-security.yml b/.github/workflows/docker-bench-security.yml
index 588c454187..1d1b56b486 100644
--- a/.github/workflows/docker-bench-security.yml
+++ b/.github/workflows/docker-bench-security.yml
@@ -16,10 +16,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/double_ws_export.yml b/.github/workflows/double_ws_export.yml
index efabaaa6d6..d4d6b0459c 100644
--- a/.github/workflows/double_ws_export.yml
+++ b/.github/workflows/double_ws_export.yml
@@ -23,10 +23,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/experimental_workflow_tests.yml b/.github/workflows/experimental_workflow_tests.yml
index 030f42ed14..d99cec2e5f 100644
--- a/.github/workflows/experimental_workflow_tests.yml
+++ b/.github/workflows/experimental_workflow_tests.yml
@@ -10,6 +10,10 @@ on:
permissions:
contents: read
+env:
+ # A workaround for long FQDN names provided by GitHub actions.
+ FQDN: "localhost"
+
jobs:
build:
if: (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'workflow_interface'))
@@ -18,10 +22,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml
index 749d9fb51b..6e90292158 100644
--- a/.github/workflows/hadolint.yml
+++ b/.github/workflows/hadolint.yml
@@ -19,10 +19,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 4ebdd422fb..13d53e885d 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -19,10 +19,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install linters
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/pki.yml b/.github/workflows/pki.yml
index 9cac1df031..d8ecbecb03 100644
--- a/.github/workflows/pki.yml
+++ b/.github/workflows/pki.yml
@@ -23,10 +23,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@@ -40,10 +40,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/pytest_coverage.yml b/.github/workflows/pytest_coverage.yml
index bf968fc5f0..9371f74e13 100644
--- a/.github/workflows/pytest_coverage.yml
+++ b/.github/workflows/pytest_coverage.yml
@@ -24,10 +24,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/straggler-handling.yml b/.github/workflows/straggler-handling.yml
index c37fa7daf4..47b7f05709 100644
--- a/.github/workflows/straggler-handling.yml
+++ b/.github/workflows/straggler-handling.yml
@@ -26,10 +26,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml
index 567cce7ba7..1c1a91ca67 100644
--- a/.github/workflows/task_runner_e2e.yml
+++ b/.github/workflows/task_runner_e2e.yml
@@ -39,7 +39,7 @@ jobs:
# There are open issues for some of the models, so excluding them for now:
# model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ]
model_name: ["torch_cnn_mnist", "keras_cnn_mnist"]
- python_version: ["3.8", "3.9", "3.10"]
+ python_version: ["3.9", "3.10", "3.11"]
fail-fast: false # do not immediately fail if one of the combinations fail
env:
@@ -230,3 +230,70 @@ jobs:
with:
name: tr_no_client_auth_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }}
path: result.tar
+
+ test_memory_logs:
+ name: tr_tls_memory_logs
+ runs-on: ubuntu-22.04
+ timeout-minutes: 15
+ strategy:
+ matrix:
+ # Testing non TLS scenario only for torch_cnn_mnist model and python 3.10
+ # If required, this can be extended to other models and python versions
+ model_name: ["torch_cnn_mnist"]
+ python_version: ["3.10"]
+ fail-fast: false # do not immediately fail if one of the combinations fail
+
+ env:
+ MODEL_NAME: ${{ matrix.model_name }}
+ PYTHON_VERSION: ${{ matrix.python_version }}
+
+ steps:
+ - name: Checkout OpenFL repository
+ id: checkout_openfl
+ uses: actions/checkout@v4.1.1
+ with:
+ fetch-depth: 2 # needed for detecting changes
+ submodules: "true"
+ token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Set up Python
+ id: setup_python
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+
+ - name: Install dependencies
+ id: install_dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install .
+ pip install -r test-requirements.txt
+
+ - name: Run Task Runner E2E tests without TLS
+ id: run_tests
+ run: |
+ python -m pytest -s tests/end_to_end/test_suites/memory_logs_tests.py \
+ --model_name ${{ env.MODEL_NAME }} --num_rounds ${{ env.NUM_ROUNDS }} \
+ --num_collaborators ${{ env.NUM_COLLABORATORS }} --log_memory_usage
+ echo "Task runner memory logs test run completed"
+
+ - name: Print test summary
+ id: print_test_summary
+ if: ${{ always() }}
+ run: |
+ export PYTHONPATH="$PYTHONPATH:."
+ python tests/end_to_end/utils/summary_helper.py
+ echo "Test summary printed"
+
+ - name: Tar files
+ id: tar_files
+ if: ${{ always() }}
+ run: tar -cvf result.tar results
+
+ - name: Upload Artifacts
+ id: upload_artifacts
+ uses: actions/upload-artifact@v4
+ if: ${{ always() }}
+ with:
+ name: tr_tls_memory_logs_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }}
+ path: result.tar
diff --git a/.github/workflows/taskrunner.yml b/.github/workflows/taskrunner.yml
index 8f1bcc35dc..1e3a3ab6c2 100644
--- a/.github/workflows/taskrunner.yml
+++ b/.github/workflows/taskrunner.yml
@@ -21,7 +21,7 @@ jobs:
strategy:
matrix:
os: ['ubuntu-latest', 'windows-latest']
- python-version: ['3.8','3.9','3.10','3.11']
+ python-version: ["3.9", "3.10", "3.11"]
runs-on: ${{ matrix.os }}
timeout-minutes: 15
diff --git a/.github/workflows/taskrunner_eden_pipeline.yml b/.github/workflows/taskrunner_eden_pipeline.yml
index 2cb0e9effc..44de314a5f 100644
--- a/.github/workflows/taskrunner_eden_pipeline.yml
+++ b/.github/workflows/taskrunner_eden_pipeline.yml
@@ -22,10 +22,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/tr_docker_gramine_direct.yml b/.github/workflows/tr_docker_gramine_direct.yml
index be2afea5f9..855a059c98 100644
--- a/.github/workflows/tr_docker_gramine_direct.yml
+++ b/.github/workflows/tr_docker_gramine_direct.yml
@@ -17,10 +17,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/tr_docker_native.yml b/.github/workflows/tr_docker_native.yml
index 899fcd8296..f97b268636 100644
--- a/.github/workflows/tr_docker_native.yml
+++ b/.github/workflows/tr_docker_native.yml
@@ -17,10 +17,10 @@ jobs:
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml
index 70a195e70b..4c3c99af43 100644
--- a/.github/workflows/ubuntu.yml
+++ b/.github/workflows/ubuntu.yml
@@ -13,16 +13,15 @@ env:
jobs:
pytest-coverage: # from pytest_coverage.yml
- needs: lint
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@@ -35,16 +34,16 @@ jobs:
coverage report
cli:
- needs: [lint, pytest-coverage]
+ needs: [pytest-coverage]
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index 6d8330d553..1e5b245cc1 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -17,10 +17,10 @@ jobs:
timeout-minutes: 15
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
@@ -38,10 +38,10 @@ jobs:
timeout-minutes: 15
steps:
- uses: actions/checkout@v3
- - name: Set up Python 3.8
+ - name: Set up Python 3
uses: actions/setup-python@v3
with:
- python-version: "3.8"
+ python-version: "3.9"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
diff --git a/Jenkinsfile b/Jenkinsfile
index b4e012ad32..c1904d0453 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -81,7 +81,7 @@ pipeline {
SNYK_ALLOW_LONG_PROJECT_NAME = true
SNYK_USE_MULTI_PROC = true
SNYK_DEBUG = true
- SNYK_PYTHON_VERSION = '3.8'
+ SNYK_PYTHON_VERSION = '3.9'
BANDIT_SOURCE_PATH = 'openfl/ openfl-workspace/ openfl-tutorials/'
BANDIT_SEVERITY_LEVEL = 'high'
@@ -114,7 +114,7 @@ pipeline {
stage('Build Package') {
agent {
docker {
- image 'python:3.8'
+ image 'python:3.9'
}
}
steps {
diff --git a/README.md b/README.md
index 751df07df1..717fece66d 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,8 @@
-[![PyPI - Python Version](https://img.shields.io/badge/python-3.7%20%7C%203.8%20%7C%203.9%20%7C%203.10-blue)](https://pypi.org/project/openfl/)
+
+[![PyPI - Python Version](https://img.shields.io/badge/python-3.9%20%7C%203.10%20%7C%203.11-blue)](https://pypi.org/project/openfl/)
[![Ubuntu CI status](https://github.com/securefederatedai/openfl/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/securefederatedai/openfl/actions/workflows/ubuntu.yml)
[![Windows CI status](https://github.com/securefederatedai/openfl/actions/workflows/windows.yml/badge.svg)](https://github.com/securefederatedai/openfl/actions/workflows/windows.yml)
[![Documentation Status](https://readthedocs.org/projects/openfl/badge/?version=latest)](https://openfl.readthedocs.io/en/latest/?badge=latest)
diff --git a/docs/about/features_index/taskrunner.rst b/docs/about/features_index/taskrunner.rst
index f4e3cc0730..75b408f0d2 100644
--- a/docs/about/features_index/taskrunner.rst
+++ b/docs/about/features_index/taskrunner.rst
@@ -147,7 +147,7 @@ Bare Metal Approach
STEP 1: Create a Workspace
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-1. Start a Python 3.8 (>=3.6, <3.11) virtual environment and confirm |productName| is available.
+1. Start a Python 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available.
.. code-block:: console
diff --git a/docs/developer_guide/running_the_federation.notebook.rst b/docs/developer_guide/running_the_federation.notebook.rst
index d335c112d0..f5bd9c12ff 100644
--- a/docs/developer_guide/running_the_federation.notebook.rst
+++ b/docs/developer_guide/running_the_federation.notebook.rst
@@ -17,7 +17,7 @@ You will start a Jupyter\* \ lab server and receive a URL you can use to access
Start the Tutorials
===================
-1. Start a Python\* \ 3.8 (>=3.6, <3.9) virtual environment and confirm |productName| is available.
+1. Start a Python\* \ 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available.
.. code-block:: python
diff --git a/docs/developer_guide/running_the_federation_with_gandlf.rst b/docs/developer_guide/running_the_federation_with_gandlf.rst
index 34d5445233..331b17c804 100644
--- a/docs/developer_guide/running_the_federation_with_gandlf.rst
+++ b/docs/developer_guide/running_the_federation_with_gandlf.rst
@@ -114,7 +114,7 @@ However, continue with the following procedure for details in creating a federat
STEP 1: Install GaNDLF prerequisites and Create a Workspace
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-1. Start a Python 3.8 (>=3.7, <3.11) virtual environment and confirm |productName| is available.
+1. Start a Python 3.9 (>=3.9, <3.12) virtual environment and confirm |productName| is available.
.. code-block:: python
diff --git a/docs/get_started/installation.rst b/docs/get_started/installation.rst
index 0b246ddb7d..2e79c92250 100644
--- a/docs/get_started/installation.rst
+++ b/docs/get_started/installation.rst
@@ -19,7 +19,7 @@ Install the Package
Follow this procedure to prepare the environment and install the |productName| package.
Perform this procedure on every node in the federation.
-1. Install a Python 3.8 (>=3.6, <3.9) virtual environment using venv.
+1. Install a Python 3.9 (>=3.9, <3.12) virtual environment using venv.
See the `Venv installation guide `_ for details.
diff --git a/openfl-tutorials/__init__.py b/openfl-tutorials/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt
index 6b4abbf54d..4dc2bc32cb 100644
--- a/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt
+++ b/openfl-tutorials/experimental/workflow/workflow_interface_requirements.txt
@@ -1,10 +1,10 @@
-astor==0.8.1
chardet
charset-normalizer
dill==0.3.6
matplotlib>=2.0.0
metaflow==2.7.15
nbdev==2.3.12
+nbformat==5.10.4
ray==2.9.2
torch
torchvision
diff --git a/openfl-workspace/keras_cnn_mnist/requirements.txt b/openfl-workspace/keras_cnn_mnist/requirements.txt
index af80212eeb..4ce0afb4ce 100644
--- a/openfl-workspace/keras_cnn_mnist/requirements.txt
+++ b/openfl-workspace/keras_cnn_mnist/requirements.txt
@@ -1 +1,2 @@
+numpy==1.23.5
tensorflow==2.13
diff --git a/openfl/component/aggregator/aggregator.py b/openfl/component/aggregator/aggregator.py
index 038a1e00d3..1e34aa7e92 100644
--- a/openfl/component/aggregator/aggregator.py
+++ b/openfl/component/aggregator/aggregator.py
@@ -15,7 +15,7 @@
from openfl.pipelines import NoCompressionPipeline, TensorCodec
from openfl.protocols import base_pb2, utils
from openfl.utilities import TaskResultKey, TensorKey, change_tags
-from openfl.utilities.logs import get_memory_usage, write_metric
+from openfl.utilities.logs import get_memory_usage, write_memory_usage_to_file, write_metric
class Aggregator:
@@ -79,7 +79,7 @@ def __init__(
write_logs=False,
log_memory_usage=False,
log_metric_callback=None,
- **kwargs,
+ initial_tensor_dict=None,
):
"""Initializes the Aggregator.
@@ -110,15 +110,18 @@ def __init__(
to None.
**kwargs: Additional keyword arguments.
"""
+ self.logger = getLogger(__name__)
self.round_number = 0
- self.single_col_cert_common_name = single_col_cert_common_name
- if self.single_col_cert_common_name is not None:
- self._log_big_warning()
- else:
- # FIXME: '' instead of None is just for protobuf compatibility.
- # Cleaner solution?
- self.single_col_cert_common_name = ""
+ if single_col_cert_common_name:
+ self.logger.warning(
+ "You are running in single collaborator certificate mode. "
+ "This mode is intended for development settings only and does not "
+ "provide proper Public Key Infrastructure (PKI) security. "
+ "Please use this mode with caution."
+ )
+ # FIXME: "" instead of None is for protobuf compatibility.
+ self.single_col_cert_common_name = single_col_cert_common_name or ""
self.straggler_handling_policy = (
straggler_handling_policy or CutoffTimeBasedStragglerHandling()
@@ -143,7 +146,6 @@ def __init__(
self.db_store_rounds = db_store_rounds
# Gathered together logging-related objects
- self.logger = getLogger(__name__)
self.write_logs = write_logs
self.log_metric_callback = log_metric_callback
@@ -166,10 +168,10 @@ def __init__(
self.best_tensor_dict: dict = {}
self.last_tensor_dict: dict = {}
- if kwargs.get("initial_tensor_dict", None) is not None:
- self._load_initial_tensors_from_dict(kwargs["initial_tensor_dict"])
+ if initial_tensor_dict:
+ self._load_initial_tensors_from_dict(initial_tensor_dict)
self.model = utils.construct_model_proto(
- tensor_dict=kwargs["initial_tensor_dict"],
+ tensor_dict=initial_tensor_dict,
round_number=0,
tensor_pipe=self.compression_pipeline,
)
@@ -326,9 +328,7 @@ def _time_to_quit(self):
Returns:
bool: True if it's time to quit, False otherwise.
"""
- if self.round_number >= self.rounds_to_train:
- return True
- return False
+ return self.round_number >= self.rounds_to_train
def get_tasks(self, collaborator_name):
"""RPC called by a collaborator to determine which tasks to perform.
@@ -1013,6 +1013,7 @@ def _end_of_round_check(self):
if self._time_to_quit():
if self.log_memory_usage:
self.logger.info(f"Publish memory usage: {self.memory_details}")
+ write_memory_usage_to_file(self.memory_details, "aggregator_memory_usage.json")
self.logger.info("Experiment Completed. Cleaning up...")
else:
self.logger.info("Starting round %s...", self.round_number)
@@ -1056,19 +1057,10 @@ def _is_collaborator_done(self, collaborator_name: str, round_number: int) -> No
if all_tasks_completed:
self.collaborators_done.append(collaborator_name)
self.logger.info(
- f"Round: {self.round_number}, Collaborators that have completed all tasks: "
+ f"Round {self.round_number}: Collaborators that have completed all tasks: "
f"{self.collaborators_done}"
)
- def _log_big_warning(self):
- """Warn user about single collaborator cert mode."""
- self.logger.warning(
- f"\n{the_dragon}\nYOU ARE RUNNING IN SINGLE COLLABORATOR CERT MODE! THIS IS"
- f" NOT PROPER PKI AND "
- f"SHOULD ONLY BE USED IN DEVELOPMENT SETTINGS!!!! YE HAVE BEEN"
- f" WARNED!!!"
- )
-
def stop(self, failed_collaborator: str = None) -> None:
"""Stop aggregator execution.
@@ -1092,76 +1084,3 @@ def stop(self, failed_collaborator: str = None) -> None:
collaborator_name,
)
self.quit_job_sent_to.append(collaborator_name)
-
-
-the_dragon = """
-
- ,@@.@@+@@##@,@@@@.`@@#@+ *@@@@ #@##@ `@@#@# @@@@@ @@ @@@@` #@@@ :@@ `@#`@@@#.@
- @@ #@ ,@ +. @@.@* #@ :` @+*@ .@`+. @@ *@::@`@@ @@# @@ #`;@`.@@ @@@`@`#@* +:@`
- @@@@@ ,@@@ @@@@ +@@+ @@@@ .@@@ @@ .@+:@@@: .;+@` @@ ,;,#@` @@ @@@@@ ,@@@* @
- @@ #@ ,@`*. @@.@@ #@ ,; `@+,@#.@.*` @@ ,@::@`@@` @@@@# @@`:@;*@+ @@ @`:@@`@ *@@ `
- .@@`@@,+@+;@.@@ @@`@@;*@ ;@@#@:*@+;@ `@@;@@ #@**@+;@ `@@:`@@@@ @@@@.`@+ .@ +@+@*,@
- `` `` ` `` . ` ` ` ` ` .` ` `` `` `` ` . `
-
-
-
- .**
- ;` `****:
- @**`*******
- *** +***********;
- ,@***;` .*:,;************
- ;***********@@***********
- ;************************,
- `*************************
- *************************
- ,************************
- **#*********************
- *@****` :**********;
- +**; .********.
- ;*; `*******#: `,:
- ****@@@++:: ,,;***.
- *@@@**;#;: +: **++*,
- @***#@@@: +*; ,****
- @*@+**** ***` ****,
- ,@#******. , **** **;,**.
- * ******** :, ;*:*+ ** :,**
- # ********:: *,.*:**` * ,*;
- . *********: .+,*:;*: : `:**
- ; :********: ***::** ` ` **
- + :****::*** , *;;::**` :*
- `` .****::;**::: *;::::*; ;*
- * *****::***:. **::::** ;:
- # *****;:**** ;*::;*** ,*`
- ; ************` ,**:****; ::*
- : *************;:;*;*++: *.
- : *****************;* `*
- `. `*****************; : *.
- .` .*+************+****;: :*
- `. :;+***********+******;` : .,*
- ; ::*+*******************. `:: .`:.
- + :::**********************;;:` *
- + ,::;*************;:::*******. *
- # `:::+*************:::;******** :, *
- @ :::***************;:;*********;:, *
- @ ::::******:*********************: ,:*
- @ .:::******:;*********************, :*
- # :::******::******###@*******;;**** *,
- # .::;*****::*****#****@*****;:::***; `` **
- * ::;***********+*****+#******::*****,,,,**
- : :;***********#******#******************
- .` `;***********#******+****+************
- `, ***#**@**+***+*****+**************;`
- ; *++**#******#+****+` `.,..
- + `@***#*******#****#
- + +***@********+**+:
- * .+**+;**;;;**;#**#
- ,` ****@ +*+:
- # +**+ :+**
- @ ;**+, ,***+
- # #@+**** *#****+
- `; @+***+@ `#**+#++
- # #*#@##, .++:.,#
- `* @# +.
- @@@
- # `@
- , """
diff --git a/openfl/component/collaborator/collaborator.py b/openfl/component/collaborator/collaborator.py
index 54683135d5..08f19b9d94 100644
--- a/openfl/component/collaborator/collaborator.py
+++ b/openfl/component/collaborator/collaborator.py
@@ -13,7 +13,7 @@
from openfl.pipelines import NoCompressionPipeline, TensorCodec
from openfl.protocols import utils
from openfl.utilities import TensorKey
-from openfl.utilities.logs import get_memory_usage
+from openfl.utilities.logs import get_memory_usage, write_memory_usage_to_file
class DevicePolicy(Enum):
@@ -82,7 +82,6 @@ def __init__(
compression_pipeline=None,
db_store_rounds=1,
log_memory_usage=False,
- **kwargs,
):
"""Initialize the Collaborator object.
@@ -104,7 +103,6 @@ def __init__(
Defaults to None.
db_store_rounds (int, optional): The number of rounds to store in
the database. Defaults to 1.
- **kwargs: Variable length argument list.
"""
self.single_col_cert_common_name = None
@@ -183,6 +181,9 @@ def run(self):
memory_details.append(memory_detail)
if self.log_memory_usage:
self.logger.info(f"Publish memory usage: {memory_details}")
+ write_memory_usage_to_file(
+ memory_details, f"{self.collaborator_name}_memory_usage.json"
+ )
self.logger.info("End of Federation reached. Exiting...")
diff --git a/openfl/experimental/workflow/workspace_export/export.py b/openfl/experimental/workflow/workspace_export/export.py
index 7df2a7684b..0e00e2efd6 100644
--- a/openfl/experimental/workflow/workspace_export/export.py
+++ b/openfl/experimental/workflow/workspace_export/export.py
@@ -13,7 +13,6 @@
from pathlib import Path
from shutil import copytree
-import astor
import nbformat
import yaml
from nbdev.export import nb_export
@@ -194,13 +193,13 @@ def __extract_class_initializing_args(self, class_name): # noqa: C901
# Use the variable name as the argument value
instantiation_args["args"][arg.id] = arg.id
elif isinstance(arg, ast.Constant):
- instantiation_args["args"][arg.s] = astor.to_source(arg)
+ instantiation_args["args"][arg.s] = ast.unparse(arg)
else:
- instantiation_args["args"][arg.arg] = astor.to_source(arg).strip()
+ instantiation_args["args"][arg.arg] = ast.unparse(arg).strip()
for kwarg in node.keywords:
# Iterate through keyword arguments
- value = astor.to_source(kwarg.value).strip()
+ value = ast.unparse(kwarg.value).strip()
# If paranthese or brackets around the value is
# found and it's not tuple or list remove
diff --git a/openfl/federated/task/runner_keras.py b/openfl/federated/task/runner_keras.py
index b33b20fde5..2d992c83bb 100644
--- a/openfl/federated/task/runner_keras.py
+++ b/openfl/federated/task/runner_keras.py
@@ -194,7 +194,7 @@ def train_iteration(self, batch_generator, metrics: list = None, **kwargs):
f"Param_metrics = {metrics}, model_metrics_names = {model_metrics_names}"
)
- history = self.model.fit(batch_generator, verbose=1, **kwargs)
+ history = self.model.fit(batch_generator, verbose=2, **kwargs)
results = []
for metric in metrics:
value = np.mean([history.history[metric]])
@@ -224,7 +224,7 @@ def validate(self, col_name, round_num, input_tensor_dict, **kwargs):
self.rebuild_model(round_num, input_tensor_dict, validation=True)
param_metrics = kwargs["metrics"]
- vals = self.model.evaluate(self.data_loader.get_valid_loader(batch_size), verbose=1)
+ vals = self.model.evaluate(self.data_loader.get_valid_loader(batch_size), verbose=2)
model_metrics_names = self.model.metrics_names
if type(vals) is not list:
vals = [vals]
diff --git a/openfl/utilities/logs.py b/openfl/utilities/logs.py
index a17f0742fc..ce64b5f7fb 100644
--- a/openfl/utilities/logs.py
+++ b/openfl/utilities/logs.py
@@ -4,6 +4,7 @@
"""Logs utilities."""
+import json
import logging
import os
@@ -92,3 +93,19 @@ def get_memory_usage() -> dict:
},
}
return memory_usage
+
+
+def write_memory_usage_to_file(memory_usage_dict, file_name):
+ """
+ Write memory usage details to a file.
+
+ Args:
+ memory_usage_dict (dict): The memory usage details to write.
+ file_name (str): The name of the file to write to.
+
+ Returns:
+ None
+ """
+ file_path = os.path.join("logs", file_name)
+ with open(file_path, "w") as f:
+ json.dump(memory_usage_dict, f, indent=4)
diff --git a/setup.py b/setup.py
index 1151bb0956..8e2d1b1c68 100644
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,15 @@ def run(self):
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type='text/markdown',
url='https://github.com/securefederatedai/openfl',
- packages=find_packages(include=("openfl", "openfl.*", "openfl-docker", "openfl-workspace")),
+ packages=find_packages(
+ include=(
+ "openfl",
+ "openfl.*",
+ "openfl-docker",
+ "openfl-workspace",
+ "openfl-tutorials",
+ )
+ ),
include_package_data=True,
setup_requires=['grpcio-tools>=1.56.2,<1.66.0'], # ensure it is in-sync with `install_requires`
install_requires=[
@@ -87,7 +95,7 @@ def run(self):
'protobuf>=4.22,<6.0.0',
'grpcio>=1.56.2,<1.66.0',
],
- python_requires='>=3.8, <3.12',
+ python_requires='>=3.9, <3.12',
project_urls={
'Bug Tracker': 'https://github.com/securefederatedai/openfl/issues',
'Documentation': 'https://openfl.readthedocs.io/en/stable/',
@@ -101,7 +109,6 @@ def run(self):
'Topic :: System :: Distributed Computing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
diff --git a/test-requirements.txt b/test-requirements.txt
index 19bf081db1..78f26429f2 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,4 +1,4 @@
lxml==5.3.0
-pytest==8.3.3
+pytest==8.3.4
pytest-asyncio==0.24.0
pytest-mock==3.14.0
diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py
index 22eed9d101..098d1fbe61 100644
--- a/tests/end_to_end/conftest.py
+++ b/tests/end_to_end/conftest.py
@@ -17,7 +17,7 @@
# Define a named tuple to store the objects for model owner, aggregator, and collaborators
federation_fixture = collections.namedtuple(
"federation_fixture",
- "model_owner, aggregator, collaborators, model_name, require_client_auth, use_tls, workspace_path, results_dir, num_rounds",
+ "model_owner, aggregator, collaborators, workspace_path",
)
def pytest_addoption(parser):
@@ -180,6 +180,24 @@ def pytest_sessionfinish(session, exitstatus):
log.debug(f"Cleared .pytest_cache directory at {cache_dir}")
+def pytest_configure(config):
+ """
+ Configure the pytest plugin.
+ Args:
+ config: pytest config object
+ """
+ # Declare some global variables
+ args = parse_arguments()
+ # Use the model name from the test case name if not provided as a command line argument
+ config.model_name = args.model_name
+ config.num_collaborators = args.num_collaborators
+ config.num_rounds = args.num_rounds
+ config.require_client_auth = not args.disable_client_auth
+ config.use_tls = not args.disable_tls
+ config.log_memory_usage = args.log_memory_usage
+ config.results_dir = config.getini("results_dir")
+
+
@pytest.fixture(scope="function")
def fx_federation(request, pytestconfig):
"""
@@ -196,38 +214,26 @@ def fx_federation(request, pytestconfig):
"""
collaborators = []
agg_domain_name = "localhost"
-
- # Parse the command line arguments
- args = parse_arguments()
- # Use the model name from the test case name if not provided as a command line argument
- model_name = args.model_name if args.model_name else request.node.name.split("test_")[1]
- results_dir = pytestconfig.getini("results_dir")
- num_collaborators = args.num_collaborators
- num_rounds = args.num_rounds
- require_client_auth = not args.disable_client_auth
- use_tls = not args.disable_tls
- log_memory_usage = args.log_memory_usage
-
log.info(
f"Running federation setup using Task Runner API on single machine with below configurations:\n"
- f"\tNumber of collaborators: {num_collaborators}\n"
- f"\tNumber of rounds: {num_rounds}\n"
- f"\tModel name: {model_name}\n"
- f"\tClient authentication: {require_client_auth}\n"
- f"\tTLS: {use_tls}\n"
- f"\tMemory Logs: {log_memory_usage}"
+ f"\tNumber of collaborators: {request.config.num_collaborators}\n"
+ f"\tNumber of rounds: {request.config.num_rounds}\n"
+ f"\tModel name: {request.config.model_name}\n"
+ f"\tClient authentication: {request.config.require_client_auth}\n"
+ f"\tTLS: {request.config.use_tls}\n"
+ f"\tMemory Logs: {request.config.log_memory_usage}"
)
# Validate the model name and create the workspace name
- if not model_name.upper() in constants.ModelName._member_names_:
- raise ValueError(f"Invalid model name: {model_name}")
+ if not request.config.model_name.upper() in constants.ModelName._member_names_:
+ raise ValueError(f"Invalid model name: {request.config.model_name}")
- workspace_name = f"workspace_{model_name}"
+ workspace_name = request.config.model_name
# Create model owner object and the workspace for the model
- model_owner = participants.ModelOwner(workspace_name, model_name, log_memory_usage)
+ model_owner = participants.ModelOwner(workspace_name, request.config.model_name, request.config.log_memory_usage)
try:
- workspace_path = model_owner.create_workspace(results_dir=results_dir)
+ workspace_path = model_owner.create_workspace(results_dir=request.config.results_dir)
except Exception as e:
log.error(f"Failed to create the workspace: {e}")
raise e
@@ -235,19 +241,19 @@ def fx_federation(request, pytestconfig):
# Modify the plan
try:
model_owner.modify_plan(
- new_rounds=num_rounds,
- num_collaborators=num_collaborators,
- require_client_auth=require_client_auth,
- use_tls=use_tls,
+ new_rounds=request.config.num_rounds,
+ num_collaborators=request.config.num_collaborators,
+ require_client_auth=request.config.require_client_auth,
+ use_tls=request.config.use_tls,
)
except Exception as e:
log.error(f"Failed to modify the plan: {e}")
raise e
- if not use_tls:
+ if not request.config.use_tls:
log.info("Disabling TLS for communication")
try:
- model_owner.register_collaborators(num_collaborators)
+ model_owner.register_collaborators(request.config.num_collaborators)
except Exception as e:
log.error(f"Failed to register the collaborators: {e}")
raise e
@@ -271,7 +277,7 @@ def fx_federation(request, pytestconfig):
agg_domain_name=agg_domain_name, workspace_path=workspace_path
)
- for i in range(num_collaborators):
+ for i in range(request.config.num_collaborators):
collaborator = participants.Collaborator(
collaborator_name=f"collaborator{i+1}",
data_directory_path=i + 1,
@@ -285,10 +291,5 @@ def fx_federation(request, pytestconfig):
model_owner=model_owner,
aggregator=aggregator,
collaborators=collaborators,
- model_name=model_name,
- require_client_auth=require_client_auth,
- use_tls=use_tls,
workspace_path=workspace_path,
- results_dir=results_dir,
- num_rounds=num_rounds,
)
diff --git a/tests/end_to_end/test_suites/memory_logs_tests.py b/tests/end_to_end/test_suites/memory_logs_tests.py
new file mode 100644
index 0000000000..d763c419d0
--- /dev/null
+++ b/tests/end_to_end/test_suites/memory_logs_tests.py
@@ -0,0 +1,73 @@
+# Copyright 2020-2023 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+import pytest
+import logging
+import os
+import json
+
+from tests.end_to_end.utils import federation_helper as fed_helper
+
+log = logging.getLogger(__name__)
+
+
+@pytest.mark.log_memory_usage
+def test_log_memory_usage(request, fx_federation):
+ """
+ This module contains end-to-end tests for logging memory usage in a federated learning setup.
+ Test Suite:
+ - test_log_memory_usage: Tests the memory usage logging functionality for the torch_cnn_mnist model.
+ Functions:
+ - test_log_memory_usage(request, fx_federation):
+ Test the memory usage logging functionality in a federated learning setup.
+ Parameters:
+ - request: The pytest request object containing configuration options.
+ - fx_federation: The fixture representing the federated learning setup.
+ Steps:
+ 1. Skip the test if memory usage logging is disabled.
+ 2. Setup PKI for trusted communication if TLS is enabled.
+ 3. Start the federation and verify its completion.
+ 4. Verify the existence of memory usage logs for the aggregator.
+ 5. Verify the memory usage details for each round.
+ 6. Verify the existence and details of memory usage logs for each collaborator.
+ 7. Log the availability of memory usage details for all participants.
+ """
+ # Skip test if fx_federation.log_memory_usage is False
+ if not request.config.log_memory_usage:
+ pytest.skip("Memory usage logging is disabled")
+
+ # Setup PKI for trusted communication within the federation
+ if request.config.use_tls:
+ assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication"
+
+ # Start the federation
+ results = fed_helper.run_federation(fx_federation)
+
+ # Verify the completion of the federation run
+ assert fed_helper.verify_federation_run_completion(fx_federation, results, \
+ num_rounds=request.config.num_rounds), "Federation completion failed"
+ # Verify the aggregator memory logs
+ aggregator_memory_usage_file = os.path.join(fx_federation.workspace_path, "logs", "aggregator_memory_usage.json")
+ assert os.path.exists(aggregator_memory_usage_file), "Aggregator memory usage file is not available"
+
+ # Log the aggregator memory usage details
+ memory_usage_dict = json.load(open(aggregator_memory_usage_file))
+
+ # check memory usage entries for each round
+ assert len(memory_usage_dict) == request.config.num_rounds, \
+ "Memory usage details are not available for all rounds"
+
+ # check memory usage entries for each collaborator
+ for collaborator in fx_federation.collaborators:
+ collaborator_memory_usage_file = os.path.join(fx_federation.workspace_path,
+ "logs",
+ f"{collaborator.collaborator_name}_memory_usage.json")
+
+ assert os.path.exists(collaborator_memory_usage_file), f"Memory usage file for collaborator {collaborator.collaborator_name} is not available"
+
+ memory_usage_dict = json.load(open(collaborator_memory_usage_file))
+
+ assert len(memory_usage_dict) == request.config.num_rounds, \
+ f"Memory usage details are not available for all rounds for collaborator {collaborator.collaborator_name}"
+
+ log.info("Memory usage details are available for all participants")
diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py
index 43946d61fb..077ed642dd 100644
--- a/tests/end_to_end/test_suites/task_runner_tests.py
+++ b/tests/end_to_end/test_suites/task_runner_tests.py
@@ -10,51 +10,54 @@
@pytest.mark.torch_cnn_mnist
-def test_torch_cnn_mnist(fx_federation):
+def test_torch_cnn_mnist(request, fx_federation):
"""
Test for torch_cnn_mnist model.
"""
log.info("Testing torch_cnn_mnist model")
# Setup PKI for trusted communication within the federation
- if fx_federation.use_tls:
+ if request.config.use_tls:
assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication"
# Start the federation
results = fed_helper.run_federation(fx_federation)
# Verify the completion of the federation run
- assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed"
+ assert fed_helper.verify_federation_run_completion(fx_federation, results,
+ num_rounds=request.config.num_rounds), "Federation completion failed"
@pytest.mark.keras_cnn_mnist
-def test_keras_cnn_mnist(fx_federation):
+def test_keras_cnn_mnist(request, fx_federation):
log.info("Testing keras_cnn_mnist model")
# Setup PKI for trusted communication within the federation
- if fx_federation.use_tls:
+ if request.config.use_tls:
assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication"
# Start the federation
results = fed_helper.run_federation(fx_federation)
# Verify the completion of the federation run
- assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed"
+ assert fed_helper.verify_federation_run_completion(fx_federation, results,
+ num_rounds=request.config.num_rounds), "Federation completion failed"
@pytest.mark.torch_cnn_histology
-def test_torch_cnn_histology(fx_federation):
+def test_torch_cnn_histology(request, fx_federation):
"""
Test for torch_cnn_histology model
"""
log.info("Testing torch_cnn_histology model")
# Setup PKI for trusted communication within the federation
- if fx_federation.use_tls:
+ if request.config.use_tls:
assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication"
# Start the federation
results = fed_helper.run_federation(fx_federation)
# Verify the completion of the federation run
- assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed"
+ assert fed_helper.verify_federation_run_completion(fx_federation, results,
+ num_rounds=request.config.num_rounds), "Federation completion failed"
diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py
index d87667077e..dc39323fa9 100644
--- a/tests/end_to_end/utils/federation_helper.py
+++ b/tests/end_to_end/utils/federation_helper.py
@@ -4,6 +4,8 @@
import time
import concurrent.futures
import logging
+import json
+import re
from tests.end_to_end.utils.constants import SUCCESS_MARKER
@@ -68,12 +70,13 @@ def run_federation(fed_obj):
return results
-def verify_federation_run_completion(fed_obj, results):
+def verify_federation_run_completion(fed_obj, results, num_rounds):
"""
Verify the completion of the process for all the participants
Args:
fed_obj (object): Federation fixture object
results (list): List of results
+ num_rounds (int): Number of rounds
Returns:
list: List of response (True or False) for all the participants
"""
@@ -85,7 +88,7 @@ def verify_federation_run_completion(fed_obj, results):
executor.submit(
_verify_completion_for_participant,
participant,
- fed_obj.num_rounds,
+ num_rounds,
results[i]
)
for i, participant in enumerate(fed_obj.collaborators + [fed_obj.aggregator])
@@ -133,3 +136,53 @@ def _verify_completion_for_participant(participant, num_rounds, result_file, tim
else:
log.info(f"Process completed for {participant.name} in {time.time() - start_time} seconds")
return True
+
+
+def extract_memory_usage(log_file):
+ """
+ Extracts memory usage data from a log file.
+ This function reads the content of the specified log file, searches for memory usage data
+ using a regular expression pattern, and returns the extracted data as a dictionary.
+ Args:
+ log_file (str): The path to the log file from which to extract memory usage data.
+ Returns:
+ dict: A dictionary containing the memory usage data.
+ Raises:
+ json.JSONDecodeError: If there is an error decoding the JSON data.
+ Exception: If memory usage data is not found in the log file.
+ """
+ try:
+ with open(log_file, 'r') as file:
+ content = file.read()
+
+ pattern = r"Publish memory usage: (\[.*?\])"
+ match = re.search(pattern, content, re.DOTALL)
+
+ if match:
+ memory_usage_data = match.group(1)
+ memory_usage_data = re.sub(r'\S+\.py:\d+', '', memory_usage_data)
+ memory_usage_data = memory_usage_data.replace('\n', '').replace(' ', '')
+ memory_usage_data = memory_usage_data.replace("'", '"')
+ memory_usage_dict = json.loads(memory_usage_data)
+ return memory_usage_dict
+ else:
+ log.error("Memory usage data not found in the log file")
+ raise Exception("Memory usage data not found in the log file")
+ except Exception as e:
+ log.error(f"An error occurred while extracting memory usage: {e}")
+ raise e
+
+def write_memory_usage_to_file(memory_usage_dict, output_file):
+ """
+ Writes memory usage data to a file.
+ This function writes the specified memory usage data to the specified output file.
+ Args:
+ memory_usage_dict (dict): A dictionary containing the memory usage data.
+ output_file (str): The path to the output file to which to write the memory usage data.
+ """
+ try:
+ with open(output_file, 'w') as file:
+ json.dump(memory_usage_dict, file, indent=4)
+ except Exception as e:
+ log.error(f"An error occurred while writing memory usage data to file: {e}")
+ raise e
diff --git a/tests/github/pki_wrong_cn.py b/tests/github/pki_wrong_cn.py
index 7ce669d707..33eaeb0a9d 100644
--- a/tests/github/pki_wrong_cn.py
+++ b/tests/github/pki_wrong_cn.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020-2023 Intel Corporation
+# Copyright (C) 2020-2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import grpc
import subprocess
@@ -75,7 +75,7 @@ def start_aggregator():
subprocess.check_call([
'fx', 'workspace', 'create',
'--prefix', prefix,
- '--template', 'keras_cnn_mnist'
+ '--template', 'torch_cnn_mnist'
])
os.chdir(prefix)
fqdn = getfqdn_env()