From c23d7e8979fcdc3373f8e37efbdda329ea8af51d Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 7 Nov 2024 16:14:34 +0530 Subject: [PATCH 01/62] Download src only for packages that exist on pip Signed-off-by: Shah, Karan --- openfl-docker/licenses.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/openfl-docker/licenses.sh b/openfl-docker/licenses.sh index 005a8bccfa..4eba1e8c97 100755 --- a/openfl-docker/licenses.sh +++ b/openfl-docker/licenses.sh @@ -45,6 +45,6 @@ if [ "$INSTALL_SOURCES" = "yes" ]; then # Append dependency list to all_dependencies.txt pip-licenses | awk '{for(i=1;i<=NF;i++) if(i!=2) printf $i" "; print ""}' | tee -a all_dependencies.txt - # Download source packages for Python packages with specific licenses - pip-licenses | grep -E 'GPL|MPL|EPL' | awk '{OFS="=="} {print $1,$2}' | xargs pip download --no-binary :all: + # Download source packages for Python packages (if exists) with specific licenses + pip-licenses | grep -E 'GPL|MPL|EPL' | awk '{OFS="=="} {print $1,$2}' | xargs -I {} sh -c 'pip download --no-binary :all: {} || true' fi From ec25130faf7aae4198951e692930c3d28517c1ec Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Fri, 8 Nov 2024 12:45:10 +0530 Subject: [PATCH 02/62] Install Gramine in base image Signed-off-by: Shah, Karan --- openfl-docker/Dockerfile.base | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/openfl-docker/Dockerfile.base b/openfl-docker/Dockerfile.base index 0b5d746aca..c85f5ca619 100644 --- a/openfl-docker/Dockerfile.base +++ b/openfl-docker/Dockerfile.base @@ -15,6 +15,7 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ apt-get update && \ apt-get install -y \ git \ + curl \ python3-pip \ python3.10-dev \ ca-certificates \ @@ -23,6 +24,18 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ apt-get purge -y linux-libc-dev && \ rm -rf /var/lib/apt/lists/* +# Install Gramine +RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ + curl -fsSLo /usr/share/keyrings/gramine-keyring.gpg https://packages.gramineproject.io/gramine-keyring.gpg && \ + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/gramine-keyring.gpg] https://packages.gramineproject.io/ jammy main" \ + | tee /etc/apt/sources.list.d/gramine.list && \ + curl -fsSLo /usr/share/keyrings/intel-sgx-deb.asc https://download.01.org/intel-sgx/sgx_repo/ubuntu/intel-sgx-deb.key && \ + echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-sgx-deb.asc] https://download.01.org/intel-sgx/sgx_repo/ubuntu jammy main" \ + | tee /etc/apt/sources.list.d/intel-sgx.list && \ + apt-get update && \ + apt-get install -y gramine --no-install-recommends && \ + rm -rf /var/lib/apt/lists/* + # Create an unprivileged user. RUN groupadd -g 1001 default && \ useradd -m -u 1001 -g default user From 39aacfc00635f2af4f1e51e51a4f212407ad8ead Mon Sep 17 00:00:00 2001 From: Teodor Parvanov Date: Fri, 8 Nov 2024 09:05:34 +0100 Subject: [PATCH 03/62] Wiring flake8 with the PR pipeline Signed-off-by: Teodor Parvanov --- .github/workflows/lint.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1c7f9fe4aa..808142965a 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,4 +26,7 @@ jobs: python -m pip install --upgrade pip pip install -r linters-requirements.txt - name: Lint using built-in script - run: bash shell/lint.sh \ No newline at end of file + run: bash shell/lint.sh + - name: Lint with flake8 + run: | + flake8 --show-source \ No newline at end of file From e554695917cdf008c6091715e434d5f077f90068 Mon Sep 17 00:00:00 2001 From: Teodor Parvanov Date: Fri, 8 Nov 2024 09:06:05 +0100 Subject: [PATCH 04/62] Fixing code formatting issues reported by flake8 Signed-off-by: Teodor Parvanov --- openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py | 6 +++--- .../keras_cnn_with_compression/src/tfmnist_inmemory.py | 6 +++--- openfl-workspace/torch_cnn_histology/src/dataloader.py | 6 +++--- openfl-workspace/torch_cnn_mnist/src/dataloader.py | 6 +++--- .../src/ptmnist_inmemory.py | 6 +++--- .../torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py | 6 +++--- .../torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py | 6 +++--- openfl-workspace/torch_unet_kvasir/src/data_loader.py | 6 +++--- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py b/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py index 51f4ccf739..bbad002681 100644 --- a/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py +++ b/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # collaborator list. try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py b/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py index 80b913e5f5..f932502e31 100644 --- a/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py +++ b/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # collaborator list. try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_histology/src/dataloader.py b/openfl-workspace/torch_cnn_histology/src/dataloader.py index fa4ae86778..b0655cf5f0 100644 --- a/openfl-workspace/torch_cnn_histology/src/dataloader.py +++ b/openfl-workspace/torch_cnn_histology/src/dataloader.py @@ -39,10 +39,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist/src/dataloader.py b/openfl-workspace/torch_cnn_mnist/src/dataloader.py index 3f3eeeb0bb..0557e81af4 100644 --- a/openfl-workspace/torch_cnn_mnist/src/dataloader.py +++ b/openfl-workspace/torch_cnn_mnist/src/dataloader.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py index 74c8ec5d03..ecd9f777e4 100644 --- a/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py @@ -29,10 +29,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py index 324545a763..0438e5d812 100644 --- a/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py @@ -29,10 +29,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py index ccf234239c..508bea12d0 100644 --- a/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # of collaborator list. try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_unet_kvasir/src/data_loader.py b/openfl-workspace/torch_unet_kvasir/src/data_loader.py index 0f968808e4..9d454b28af 100644 --- a/openfl-workspace/torch_unet_kvasir/src/data_loader.py +++ b/openfl-workspace/torch_unet_kvasir/src/data_loader.py @@ -124,10 +124,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except: + except ValueError: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " + - "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) From 1fe5d4ae8c837bc2ae1453124f6b9e2834d44136 Mon Sep 17 00:00:00 2001 From: Teodor Parvanov Date: Fri, 8 Nov 2024 10:31:27 +0100 Subject: [PATCH 05/62] Update ubuntu.yml to use custom lint configs as in lint.yml Signed-off-by: Teodor Parvanov --- .github/workflows/lint.yml | 5 +---- .github/workflows/ubuntu.yml | 8 +++----- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 808142965a..3bf1b32dac 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -25,8 +25,5 @@ jobs: run: | python -m pip install --upgrade pip pip install -r linters-requirements.txt - - name: Lint using built-in script + - name: Lint with OpenFL-specific rules run: bash shell/lint.sh - - name: Lint with flake8 - run: | - flake8 --show-source \ No newline at end of file diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 1c1dfe49c7..3f26c797fc 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -16,14 +16,12 @@ jobs: uses: actions/setup-python@v3 with: python-version: "3.8" - - name: Install dependencies + - name: Install linters run: | python -m pip install --upgrade pip pip install -r linters-requirements.txt - pip install . - - name: Lint with flake8 - run: | - flake8 --show-source + - name: Lint with OpenFL-specific rules + run: bash shell/lint.sh pytest-coverage: # from pytest_coverage.yml needs: lint From 7e2c9cd16ae6ead598a110d2fb6b78e6050e8514 Mon Sep 17 00:00:00 2001 From: noopur Date: Fri, 8 Nov 2024 12:04:25 +0000 Subject: [PATCH 06/62] Pytest framework implementation with Task Runner GitHub Workflow Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 104 +++++ test-requirements.txt | 1 + tests/openfl_e2e/README.md | 55 +++ tests/openfl_e2e/__init__.py | 3 + tests/openfl_e2e/conftest.py | 280 +++++++++++++ tests/openfl_e2e/models/participants.py | 380 ++++++++++++++++++ tests/openfl_e2e/pytest.ini | 12 + tests/openfl_e2e/requirements.txt | 7 + tests/openfl_e2e/test_suites/sample_tests.py | 22 + .../test_suites/task_runner_tests.py | 83 ++++ tests/openfl_e2e/utils/conftest_helper.py | 35 ++ tests/openfl_e2e/utils/constants.py | 23 ++ tests/openfl_e2e/utils/federation_helper.py | 130 ++++++ tests/openfl_e2e/utils/logger.py | 38 ++ tests/openfl_e2e/utils/subprocess_helper.py | 126 ++++++ tests/openfl_e2e/utils/xml_helper.py | 75 ++++ 16 files changed, 1374 insertions(+) create mode 100644 .github/workflows/task_runner_e2e.yml create mode 100644 tests/openfl_e2e/README.md create mode 100644 tests/openfl_e2e/__init__.py create mode 100644 tests/openfl_e2e/conftest.py create mode 100644 tests/openfl_e2e/models/participants.py create mode 100644 tests/openfl_e2e/pytest.ini create mode 100644 tests/openfl_e2e/requirements.txt create mode 100644 tests/openfl_e2e/test_suites/sample_tests.py create mode 100644 tests/openfl_e2e/test_suites/task_runner_tests.py create mode 100644 tests/openfl_e2e/utils/conftest_helper.py create mode 100644 tests/openfl_e2e/utils/constants.py create mode 100644 tests/openfl_e2e/utils/federation_helper.py create mode 100644 tests/openfl_e2e/utils/logger.py create mode 100644 tests/openfl_e2e/utils/subprocess_helper.py create mode 100644 tests/openfl_e2e/utils/xml_helper.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml new file mode 100644 index 0000000000..69e5bb3dc0 --- /dev/null +++ b/.github/workflows/task_runner_e2e.yml @@ -0,0 +1,104 @@ +#--------------------------------------------------------------------------- +# Workflow to run Task Runner E2E tests +# Authors - Noopur, Payal Chaurasiya +#--------------------------------------------------------------------------- +name: Task Runner E2E + +on: + schedule: + - cron: '0 0 * * *' # Run every day at midnight + workflow_dispatch: + inputs: + num_rounds: + description: 'Number of rounds to train' + required: false + default: "5" + type: string + num_collaborators: + description: 'Number of collaborators' + required: false + default: "2" + type: string + +permissions: + contents: read + +# Environment variables common for all the jobs +env: + NUM_ROUNDS: ${{ inputs.num_rounds || '5' }} + NUM_COLLABORATORS: ${{ inputs.num_collaborators || '2' }} + +jobs: + test_run: + name: test + runs-on: ubuntu-22.04 + + strategy: + matrix: + # There are open issues for some of the models, so excluding them for now: + # 1. https://github.com/securefederatedai/openfl/issues/1126 + # 2. https://github.com/securefederatedai/openfl/issues/1127 + # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology", "tf_2dunet", "tf_cnn_histology" ] + model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] + python_version: [ "3.8", "3.9", "3.10" ] + fail-fast: false # do not immediately fail if one of the combinations fail + + env: + MODEL_NAME: ${{ matrix.model_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + + steps: + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r tests/openfl_e2e/requirements.txt + + - name: Add runner IP to /etc/hosts + id: add_runner_ip + run: | + sudo echo "127.0.0.1 aggregator" | sudo tee -a /etc/hosts + echo "Added runner IP to /etc/hosts" + + - name: Run Task Runner E2E tests + id: run_task_runner_tests + run: | + pytest -v tests/openfl_e2e/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} -s --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} + echo "Task runner regression test run completed" + env: + NO_PROXY: localhost,127.0.0.1,aggregator + + - name: Print test summary # Print the test summary only if the tests were run + id: print_test_summary + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' + run: | + python tests/openfl_e2e/utils/xml_helper.py + echo "Test summary printed" + + - name: Tar files # Tar the test results only if the tests were run + id: tar_files + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' + run: tar -cvf result.tar results + + - name: Upload Artifacts # Upload the test results only if the tar was created + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: steps.tar_files.outcome == 'success' + with: + name: task_runner_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar diff --git a/test-requirements.txt b/test-requirements.txt index 80ed75cde5..535b6d5d52 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,4 @@ pytest==8.3.3 pytest-asyncio==0.24.0 pytest-mock==3.14.0 +ruamel.yaml \ No newline at end of file diff --git a/tests/openfl_e2e/README.md b/tests/openfl_e2e/README.md new file mode 100644 index 0000000000..167252d47e --- /dev/null +++ b/tests/openfl_e2e/README.md @@ -0,0 +1,55 @@ +# Project Title + +This project is a machine learning workspace that includes various models and test suites. It is structured to facilitate the development, testing, and deployment of machine learning models. + +## Project Structure + +openfl_e2e +├── models # Central location for all model-related code for testing purpose +├── test_suites # Folder containing test files +├── utils # Folder containing helper files +├── __init__.py # To mark test directory as a Python package +├── conftest.py # Pytest framework configuration file +├── pytest.ini # Pytest initialisation file +├── README.md # Readme file +└── requirements.txt # Pytest specific requirements file + +## Pre-requisites + +Setup virtual environment and install OpenFL using [online documentation](https://openfl.readthedocs.io/en/latest/get_started/installation.html). + +## Installation + +To install the required dependencies on above virtual environment, run: + +```sh +pip install -r requirements.txt +``` + +## Usage + +### Running Tests + +To run all the test cases under test_suites, use the following command: + +```python -m pytest -s``` + +To run a specific test case, use below command: + +```python -m pytest test_suites/ -k -s``` + +** -s will ensure all the logs are printed on screen. Ignore, if not required. + +### Output Structure + +openfl_e2e +├── results + ├── # Based on the workspace name provided during test run. + ├── results.xml # Output file in JUNIT. + ├── deployment.log # Log file containing step by step test progress. + +## Contribution +Please ensure that you have tested your changes thoroughly before submitting a pull request. + +## License +This project is licensed under [Apache License Version 2.0](LICENSE). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. diff --git a/tests/openfl_e2e/__init__.py b/tests/openfl_e2e/__init__.py new file mode 100644 index 0000000000..c057ed1f6b --- /dev/null +++ b/tests/openfl_e2e/__init__.py @@ -0,0 +1,3 @@ +# Copyright (C) 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +"""Tests package.""" diff --git a/tests/openfl_e2e/conftest.py b/tests/openfl_e2e/conftest.py new file mode 100644 index 0000000000..48752c0910 --- /dev/null +++ b/tests/openfl_e2e/conftest.py @@ -0,0 +1,280 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import collections +import os +import shutil +import xml.etree.ElementTree as ET +import logging + +from tests.openfl_e2e.utils.logger import configure_logging +from tests.openfl_e2e.utils.logger import logger as log +from tests.openfl_e2e.utils.conftest_helper import parse_arguments +import tests.openfl_e2e.utils.constants as constants +import tests.openfl_e2e.models.participants as participants + +# Define a named tuple to store the objects for model owner, aggregator, and collaborators +federation_fixture = collections.namedtuple( + "federation_fixture", + "model_owner, aggregator, collaborators, model_name, workspace_path, results_dir", +) + + +def pytest_addoption(parser): + """ + Add custom command line options to the pytest parser. + Args: + parser: pytest parser object + """ + parser.addini("results_dir", "Directory to store test results", default="results") + parser.addini("log_level", "Logging level", default="DEBUG") + parser.addoption( + "--results_dir", action="store", type=str, default="results", help="Results directory" + ) + parser.addoption( + "--num_collaborators", + action="store", + type=int, + default=constants.NUM_COLLABORATORS, + help="Number of collaborators", + ) + parser.addoption( + "--num_rounds", + action="store", + type=int, + default=constants.NUM_ROUNDS, + help="Number of rounds to train", + ) + parser.addoption( + "--model_name", + action="store", + type=str, + default=constants.DEFAULT_MODEL_NAME, + help="Model name", + ) + + +@pytest.fixture(scope="session", autouse=True) +def setup_logging(pytestconfig): + """ + Setup logging for the test session. + Args: + pytestconfig: pytest config object + Returns: + logger: logger object + """ + results_dir = pytestconfig.getini("results_dir") + log_level = pytestconfig.getini("log_level") + + if not os.path.exists(results_dir): + os.makedirs(results_dir) + + # Setup a global logger to ensure logging works before any test-specific logs are set + configure_logging(os.path.join(results_dir, "deployment.log"), log_level) + return logging.getLogger() + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """ + Hook to capture the result of setup, call, and teardown phases. + This avoids duplicate entries for Pass/Fail in the XML report. + """ + outcome = yield + report = outcome.get_result() + + # Retrieve the custom test_id marker if it exists + test_id_marker = item.get_closest_marker("test_id") + outcome_mapping = {"passed": "Pass", "failed": "Fail"} + report_when_mapping = {"setup": "Setup", "call": "Test", "teardown": "Teardown"} + final_outcome = outcome_mapping.get(report.outcome, report.outcome) + report_phase = report_when_mapping.get(report.when, report.when) + + # Modify nodeid if test_id is provided and append outcome and phase + if test_id_marker: + test_id = test_id_marker.args[0] + report.nodeid = ( + f"{report.nodeid} [{test_id}] [outcome: {final_outcome}] [phase: {report_phase}]" + ) + + # Initialize XML structure if not already initialized + if not hasattr(item.config, "_xml_report"): + item.config._xml_report = ET.Element( + "testsuite", + { + "name": "pytest", + "errors": "0", + "failures": "0", + "skipped": "0", + "tests": "0", + "time": "0", + "timestamp": "", + "hostname": "", + }, + ) + + # Store the result of each phase (setup/call/teardown) + if not hasattr(item, "_results"): + item._results = {} + + # Save the outcome and other details per phase + item._results[report.when] = { + "outcome": final_outcome, + "longrepr": report.longrepr, + "duration": report.duration, + } + # Log failures + if report.when == "call" and report.failed: + logger = logging.getLogger() + logger.error(f"Test {report.nodeid} failed: {call.excinfo.value}") + + # Only create the XML element after the teardown phase + if report.when == "teardown" and not hasattr(item, "_xml_created"): + item._xml_created = True # Ensure XML creation happens only once + + # Determine final outcome based on the worst phase result + if "call" in item._results: + final_outcome = item._results["call"]["outcome"] + elif "setup" in item._results: + final_outcome = item._results["setup"]["outcome"] + else: + final_outcome = "skipped" + + # Create the XML element + testcase = ET.SubElement( + item.config._xml_report, + "testcase", + { + "classname": item.module.__name__, + "name": item.name, + "time": str(sum(result["duration"] for result in item._results.values())), + }, + ) + + # Add or tags based on the final outcome + if final_outcome == "Fail": + failure_message = item._results.get("call", {}).get( + "longrepr", item._results.get("setup", {}).get("longrepr", "Unknown Error") + ) + failure = ET.SubElement( + testcase, + "error", + { + "message": str(failure_message), + }, + ) + failure.text = str(failure_message) + elif final_outcome == "skipped": + skipped_message = item._results.get("setup", {}).get("longrepr", "Skipped") + skipped = ET.SubElement( + testcase, + "skipped", + { + "message": str(skipped_message), + }, + ) + skipped.text = str(skipped_message) + + # Update the testsuite summary statistics + tests = int(item.config._xml_report.attrib["tests"]) + 1 + item.config._xml_report.attrib["tests"] = str(tests) + if final_outcome == "Fail": + failures = int(item.config._xml_report.attrib["failures"]) + 1 + item.config._xml_report.attrib["failures"] = str(failures) + elif final_outcome == "skipped": + skipped = int(item.config._xml_report.attrib["skipped"]) + 1 + item.config._xml_report.attrib["skipped"] = str(skipped) + + +def pytest_sessionfinish(session, exitstatus): + """ + Operations to be performed after the test session is finished. + More functionalities to be added in this function in future. + """ + cache_dir = os.path.join(session.config.rootdir, ".pytest_cache") + log.debug(f"\nClearing .pytest_cache directory at {cache_dir}") + if os.path.exists(cache_dir): + shutil.rmtree(cache_dir, ignore_errors=False) + log.debug(f"Cleared .pytest_cache directory at {cache_dir}") + + +@pytest.fixture(scope="module") +def fx_federation(request, pytestconfig): + """ + Fixture for federation. This fixture is used to create the model owner, aggregator, and collaborators. + It also creates workspace. + Args: + request: pytest request object. Model name is passed as a parameter to the fixture from test cases. + pytestconfig: pytest config object + Returns: + federation_fixture: Named tuple containing the objects for model owner, aggregator, and collaborators + """ + log.info("Fixture for federation setup") + collaborators = [] + # Default name for bare metal approach, modify as required. + agg_domain_name = "aggregator" + + # Parse the command line arguments + args = parse_arguments() + model_name = args.model_name + results_dir = args.results_dir or pytestconfig.getini("results_dir") + num_collaborators = args.num_collaborators + num_rounds = args.num_rounds + + # Validate the model name and create the workspace name + if not model_name.upper() in constants.ModelName._member_names_: + raise ValueError(f"Invalid model name: {model_name}") + + workspace_name = f"workspace_{model_name}" + + # Create model owner object and the workspace for the model + model_owner = participants.ModelOwner(workspace_name, model_name) + try: + workspace_path = model_owner.create_workspace(results_dir=results_dir) + except Exception as e: + log.error(f"Failed to create the workspace: {e}") + raise e + + # Modify and initialize the plan + try: + model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators) + except Exception as e: + log.error(f"Failed to modify the plan: {e}") + raise e + + try: + model_owner.initialize_plan(agg_domain_name=agg_domain_name) + except Exception as e: + log.error(f"Failed to initialize the plan: {e}") + raise e + + # Modify and initialize the plan + try: + model_owner.certify_workspace() + except Exception as e: + log.error(f"Failed to certify the workspace: {e}") + raise e + + # Create the objects for aggregator and collaborators + aggregator = participants.Aggregator( + agg_domain_name=agg_domain_name, workspace_path=workspace_path + ) + + for i in range(num_collaborators): + collaborator = participants.Collaborator( + collaborator_name=f"collaborator{i+1}", + data_directory_path=i + 1, + workspace_path=workspace_path, + ) + collaborators.append(collaborator) + + # Return the federation fixture + return federation_fixture( + model_owner=model_owner, + aggregator=aggregator, + collaborators=collaborators, + model_name=model_name, + workspace_path=workspace_path, + results_dir=results_dir, + ) diff --git a/tests/openfl_e2e/models/participants.py b/tests/openfl_e2e/models/participants.py new file mode 100644 index 0000000000..99cd22df6c --- /dev/null +++ b/tests/openfl_e2e/models/participants.py @@ -0,0 +1,380 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import ruamel.yaml +from datetime import datetime + +import tests.openfl_e2e.utils.constants as constants +from tests.openfl_e2e.utils.logger import logger as log +import tests.openfl_e2e.utils.subprocess_helper as sh + +yaml = ruamel.yaml.YAML() +yaml.preserve_quotes = True + + +# Define the ModelOwner class +class ModelOwner: + """ + ModelOwner class to handle the model related operations. + """ + + def __init__(self, workspace_name, model_name): + """ + Initialize the ModelOwner class + """ + self.workspace_name = workspace_name + self.model_name = model_name + self.aggregator = None + self.collaborators = [] + self.workspace_path = None + self.plan_path = None + self.num_collaborators = constants.NUM_COLLABORATORS + self.rounds_to_train = constants.NUM_ROUNDS + + def create_workspace(self, results_dir=None): + """ + Create the workspace for the model + Args: + results_dir (str): Results directory path + Returns: + str: Path to the workspace + """ + try: + results_dir = results_dir if results_dir else os.getcwd() + sh.run_command( + f"fx workspace create --prefix {self.workspace_name} --template {self.model_name}", + work_dir=results_dir, + ) + log.info(f"Created the workspace {self.workspace_name} for the {self.model_name} model") + self.workspace_path = os.path.join(results_dir, self.workspace_name) + log.info(f"Workspace path: {self.workspace_path}") + except Exception as e: + log.error(f"Failed to create the workspace: {e}") + raise e + return self.workspace_path + + def get_workspace_path(self, results_dir, workspace_name): + """ + Get the workspace path + Args: + results_dir (str): Results directory path + workspace_name (str): Workspace name + Returns: + str: Path to the workspace + """ + workspace_path = os.path.join(results_dir, workspace_name) + log.info(f"Workspace path: {workspace_path}") + if os.path.exists(workspace_path): + self.workspace_path = workspace_path + log.info(f"Workspace path: {self.workspace_path}") + else: + log.error(f"Workspace {workspace_name} does not exist in {results_dir}") + raise FileNotFoundError(f"Workspace {workspace_name} does not exist in {results_dir}") + return self.workspace_path + + def modify_plan(self, new_rounds=None, num_collaborators=None): + """ + Modify the plan to train the model + Args: + new_rounds (int): Number of rounds to train + num_collaborators (int): Number of collaborators + Returns: + bool: True if successful, else False + """ + self.plan_path = os.path.join(self.workspace_path, "plan", "plan.yaml") + log.info(f"Modifying the plan at {self.plan_path}") + # Open the file and modify the entries + self.rounds_to_train = new_rounds if new_rounds else self.rounds_to_train + self.num_collaborators = num_collaborators if num_collaborators else self.num_collaborators + + with open(self.plan_path) as fp: + data = yaml.load(fp) + + data["aggregator"]["settings"]["rounds_to_train"] = int(self.rounds_to_train) + data["data_loader"]["settings"]["collaborator_count"] = int(self.num_collaborators) + + with open(self.plan_path, "w+") as write_file: + yaml.dump(data, write_file) + + log.info( + f"Modified the plan to train the model for collaborators {self.num_collaborators} and {self.rounds_to_train} rounds" + ) + return True + + def initialize_plan(self, agg_domain_name): + """ + Initialize the plan + Args: + agg_domain_name (str): Aggregator domain name + Returns: + bool: True if successful, else False + """ + try: + log.info("Initializing the plan. It will take some time to complete..") + sh.run_command(f"fx plan initialize -a {agg_domain_name}", work_dir=self.workspace_path) + log.info(f"Initialized the plan for the workspace {self.workspace_name}") + except Exception as e: + log.error(f"Failed to initialize the plan: {e}") + raise e + return True + + def certify_workspace(self): + """ + Certify the workspace + Returns: + bool: True if successful, else False + """ + try: + sh.run_command("fx workspace certify", work_dir=self.workspace_path) + log.info(f"Certified the workspace {self.workspace_name}") + except Exception as e: + log.error(f"Failed to certify the workspace: {e}") + raise e + return True + + def export_workspace(self): + """ + Export the workspace + Returns: + bool: True if successful, else False + """ + try: + sh.run_command("fx workspace export", work_dir=self.workspace_path) + log.info(f"Exported the workspace") + except Exception as e: + log.error(f"Failed to export the workspace: {e}") + raise e + return True + + def import_workspace(self, workspace_zip): + """ + Import the workspace + Args: + workspace_zip (str): Path to the workspace zip file + Returns: + bool: True if successful, else False + """ + try: + sh.run_command( + f"fx workspace import --archive {workspace_zip}", work_dir=self.workspace_path + ) + log.info(f"Imported the workspace") + except Exception as e: + log.error(f"Failed to import the workspace: {e}") + raise e + return True + + +# Define the Aggregator class +class Aggregator: + """ + Aggregator class to handle the aggregator operations. + """ + + def __init__(self, agg_domain_name=None, workspace_path=None): + """ + Initialize the Aggregator class + """ + self.name = "aggregator" + self.agg_domain_name = agg_domain_name + self.workspace_path = workspace_path + + def generate_sign_request(self): + """ + Generate a sign request for the aggregator + Returns: + bool: True if successful, else False + """ + try: + sh.run_command( + f"fx aggregator generate-cert-request --fqdn {self.agg_domain_name}", + work_dir=self.workspace_path, + ) + log.info(f"Generated a sign request for {self.name}") + except Exception as e: + log.error(f"Failed to generate the sign request: {e}") + raise e + return True + + def certify_request(self): + """ + Certify the aggregator request + Returns: + bool: True if successful, else False + """ + log.info(f"CA should sign the aggregator {self.name} request") + try: + sh.run_command( + f"fx aggregator certify --silent --fqdn {self.agg_domain_name}", + work_dir=self.workspace_path, + ) + log.info(f"CA signed the request from {self.name}") + except Exception as e: + log.error(f"Failed to certify the aggregator request : {e}") + raise e + return True + + def sign_collaborator_csr(self, collaborator_name): + """ + Sign the CSR for the collaborator + Args: + collaborator_name (str): Name of the collaborator + Returns: + bool: True if successful, else False + """ + try: + zip_name = f"col_{collaborator_name}_to_agg_cert_request.zip" + col_zip = os.path.join(os.getcwd(), self.workspace_path, zip_name) + return_code, output, error = sh.run_command( + f"fx collaborator certify --request-pkg {col_zip} -s", work_dir=self.workspace_path + ) + msg_received = [line for line in output if constants.SUCCESS_MARKER in line] + log.info(f"Message received: {msg_received}") + if return_code == 0 and len(msg_received): + log.info( + f"Successfully signed the CSR for the collaborator {collaborator_name} with zip path {col_zip}" + ) + else: + log.error(f"Failed to sign the CSR for collaborator {collaborator_name}: {error}") + + except Exception as e: + log.error(f"Failed to sign the CSR: {e}") + raise e + return True + + def start(self): + """ + Start the aggregator + Returns: + str: Path to the log file + """ + try: + log.info(f"Starting {self.name}") + curr_time = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"{self.name}_{curr_time}.log" + res_file = os.path.join(os.getcwd(), self.workspace_path, filename) + bg_file = open(res_file, "w", buffering=1) + + sh.run_command_background( + "fx aggregator start", + work_dir=self.workspace_path, + redirect_to_file=bg_file, + check_sleep=60, + ) + log.info( + f"Started {self.name} and tracking the logs at {os.path.join(self.workspace_path, filename)}" + ) + except Exception as e: + log.error(f"Failed to start the aggregator: {e}") + res_file.close() + raise e + return res_file + + +# Define the Collaborator class +class Collaborator: + """ + Collaborator class to handle the collaborator operations. + """ + + def __init__(self, collaborator_name=None, data_directory_path=None, workspace_path=None): + """ + Initialize the Collaborator class + """ + self.name = collaborator_name + self.collaborator_name = collaborator_name + self.data_directory_path = data_directory_path + self.workspace_path = workspace_path + + def generate_sign_request(self): + """ + Generate a sign request for the collaborator + Returns: + bool: True if successful, else False + """ + try: + sh.run_command( + f"fx collaborator generate-cert-request -n {self.collaborator_name}", + work_dir=self.workspace_path, + ) + log.info(f"Generated a sign request for {self.collaborator_name}") + except Exception as e: + log.error(f"Failed to generate the sign request: {e}") + raise e + return True + + def create_collaborator(self): + """ + Create the collaborator + Returns: + bool: True if successful, else False + """ + try: + sh.run_command( + f"fx collaborator create -n {self.collaborator_name} -d {self.data_directory_path}", + work_dir=self.workspace_path, + ) + log.info( + f"Created {self.collaborator_name} with the data directory {self.data_directory_path}" + ) + except Exception as e: + log.error(f"Failed to create the collaborator: {e}") + raise e + return True + + def import_certify_csr(self): + """ + Import and certify the CSR for the collaborator + Returns: + bool: True if successful, else False + """ + try: + zip_name = f"agg_to_col_{self.collaborator_name}_signed_cert.zip" + col_zip = os.path.join(os.getcwd(), self.workspace_path, zip_name) + return_code, output, error = sh.run_command( + f"fx collaborator certify --import {col_zip}", work_dir=self.workspace_path + ) + msg_received = [line for line in output if constants.SUCCESS_MARKER in line] + log.info(f"Message received: {msg_received}") + if return_code == 0 and len(msg_received): + log.info( + f"Successfully imported and certified the CSR for {self.collaborator_name} with zip path {col_zip}" + ) + else: + log.error( + f"Failed to import and certify the CSR for {self.collaborator_name}: {error}" + ) + + except Exception as e: + log.error(f"Failed to import and certify the CSR: {e}") + raise e + return True + + def start(self): + """ + Start the collaborator + Returns: + str: Path to the log file + """ + try: + log.info(f"Starting {self.collaborator_name}") + curr_time = datetime.now().strftime("%Y%m%d_%H%M%S") + filename = f"{self.collaborator_name}_{curr_time}.log" + res_file = os.path.join(os.getcwd(), self.workspace_path, filename) + bg_file = open(res_file, "w", buffering=1) + + sh.run_command_background( + f"fx collaborator start -n {self.collaborator_name}", + work_dir=self.workspace_path, + redirect_to_file=bg_file, + check_sleep=60, + ) + log.info( + f"Started {self.collaborator_name} and tracking the logs at {os.path.join(self.workspace_path, filename)}" + ) + except Exception as e: + log.error(f"Failed to start the collaborator: {e}") + res_file.close() + raise e + return res_file diff --git a/tests/openfl_e2e/pytest.ini b/tests/openfl_e2e/pytest.ini new file mode 100644 index 0000000000..9f23293c5a --- /dev/null +++ b/tests/openfl_e2e/pytest.ini @@ -0,0 +1,12 @@ +[pytest] +addopts = -ra -q -s --junitxml=results/results.xml +testpaths = test_suites +junit_family = xunit2 +results_dir = results +log_level = INFO +markers = + torch_cnn_mnist: mark a test as a torch CNN MNIST test. + keras_cnn_mnist: mark a test as a Keras CNN MNIST test. + torch_cnn_histology: mark a test as a torch CNN histology test. + tf_2dunet: mark a test as a tf 2D U-Net test. + tf_cnn_histology: mark a test as a tf CNN histology test. diff --git a/tests/openfl_e2e/requirements.txt b/tests/openfl_e2e/requirements.txt new file mode 100644 index 0000000000..e9190362e7 --- /dev/null +++ b/tests/openfl_e2e/requirements.txt @@ -0,0 +1,7 @@ +lxml +pytest +pytest-html +pytest-ordering +pytest-xdist +pyyaml +ruamel.yaml diff --git a/tests/openfl_e2e/test_suites/sample_tests.py b/tests/openfl_e2e/test_suites/sample_tests.py new file mode 100644 index 0000000000..b12f28833c --- /dev/null +++ b/tests/openfl_e2e/test_suites/sample_tests.py @@ -0,0 +1,22 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from tests.openfl_e2e.utils.logger import logger as log + + +# This file contains sample test functions to be run by pytest + +# Function to be tested +def add(a, b): + return a + b + + +# Test function +def test_add(): + log.info("Running test_add") + assert add(1, 2) == 3 + assert add(-1, 1) == 0 + assert add(0, 0) == 0 + log.info("test_add passed") diff --git a/tests/openfl_e2e/test_suites/task_runner_tests.py b/tests/openfl_e2e/test_suites/task_runner_tests.py new file mode 100644 index 0000000000..595916bc36 --- /dev/null +++ b/tests/openfl_e2e/test_suites/task_runner_tests.py @@ -0,0 +1,83 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from tests.openfl_e2e.utils.logger import logger as log +from tests.openfl_e2e.utils import federation_helper as fed_helper + + +@pytest.mark.torch_cnn_mnist +def test_torch_cnn_mnist(fx_federation): + """ + Test for torch_cnn_mnist model. + """ + log.info(f"Test for torch_cnn_mnist with fx_federation: {fx_federation}") + + # Perform CSR operations like generating sign request, certifying request, etc. + assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + + +@pytest.mark.keras_cnn_mnist +def test_keras_cnn_mnist(fx_federation): + log.info(f"Test for keras_cnn_mnist with fx_federation: {fx_federation}") + + # Perform CSR operations like generating sign request, certifying request, etc. + assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + + +@pytest.mark.torch_cnn_histology +def test_torch_cnn_histology(fx_federation): + """ + Test for torch_cnn_histology model + """ + log.info(f"Test for torch_cnn_histology with fx_federation: {fx_federation}") + + # Perform CSR operations like generating sign request, certifying request, etc. + assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + + +@pytest.mark.tf_2dunet +def test_tf_2dunet(fx_federation): + log.info(f"Test for tf_2dunet with fx_federation: {fx_federation}") + + # Perform CSR operations like generating sign request, certifying request, etc. + assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + + +@pytest.mark.tf_cnn_histology +def test_tf_cnn_histology(fx_federation): + log.info(f"Test for tf_cnn_histology with fx_federation: {fx_federation}") + + # Perform CSR operations like generating sign request, certifying request, etc. + assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" diff --git a/tests/openfl_e2e/utils/conftest_helper.py b/tests/openfl_e2e/utils/conftest_helper.py new file mode 100644 index 0000000000..563d232751 --- /dev/null +++ b/tests/openfl_e2e/utils/conftest_helper.py @@ -0,0 +1,35 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import sys + +from tests.openfl_e2e.utils.logger import logger as log + + +def parse_arguments(): + """ + Parse command line arguments to provide the required parameters for running the tests. + + Returns: + argparse.Namespace: Parsed command line arguments with the following attributes: + - results_dir (str, optional): Directory to store the results + - num_collaborators (int, default=2): Number of collaborators + - num_rounds (int, default=5): Number of rounds to train + - model_name (str, default="torch_cnn_mnist"): Model name + + Raises: + SystemExit: If the required arguments are not provided or if any argument parsing error occurs. + """ + try: + parser = argparse.ArgumentParser(description="Provide the required arguments to run the tests") + parser.add_argument("--results_dir", type=str, required=False, help="Directory to store the results") + parser.add_argument("--num_collaborators", type=int, default=2, help="Number of collaborators") + parser.add_argument("--num_rounds", type=int, default=5, help="Number of rounds to train") + parser.add_argument("--model_name", type=str, default="torch_cnn_mnist", help="Model name") + args = parser.parse_known_args()[0] + return args + + except Exception as e: + log.error(f"Failed to parse arguments: {e}") + sys.exit(1) diff --git a/tests/openfl_e2e/utils/constants.py b/tests/openfl_e2e/utils/constants.py new file mode 100644 index 0000000000..c0312e1ea8 --- /dev/null +++ b/tests/openfl_e2e/utils/constants.py @@ -0,0 +1,23 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from enum import Enum + +# Define the model names. This is a non exhaustive list of models that can be used in the tests +class ModelName(Enum): + """ + Enum class to define the model names. + """ + # IMP - The model name must be same (and in uppercase) as the model value. + # This is used to identify the model in the tests. + TORCH_CNN_MNIST = "torch_cnn_mnist" + KERAS_CNN_MNIST = "keras_cnn_mnist" + TORCH_CNN_HISTOLOGY = "torch_cnn_histology" + TF_2DUNET = "tf_2dunet" + TF_CNN_HISTOLOGY = "tf_cnn_histology" + +NUM_COLLABORATORS = 2 +NUM_ROUNDS = 5 +WORKSPACE_NAME = "my_federation" +DEFAULT_MODEL_NAME = "torch_cnn_mnist" +SUCCESS_MARKER = "✔️ OK" diff --git a/tests/openfl_e2e/utils/federation_helper.py b/tests/openfl_e2e/utils/federation_helper.py new file mode 100644 index 0000000000..c5a7cefd34 --- /dev/null +++ b/tests/openfl_e2e/utils/federation_helper.py @@ -0,0 +1,130 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import time +import concurrent.futures + +from tests.openfl_e2e.utils.logger import logger as log + + +def perform_csr_operations(fed_obj): + """ + Perform CSR operations like generating sign request, certifying request, etc. + Args: + fed_obj (object): Federation fixture object + Returns: + bool: True if successful, else False + """ + success = False + # Aggregator operations + try: + log.info(f"Performing operations for {fed_obj.aggregator.name}") + fed_obj.aggregator.generate_sign_request() + fed_obj.aggregator.certify_request() + except Exception as e: + log.error(f"Failed to perform aggregator operations: {e}") + raise e + + # Collaborator operations + for collaborator in fed_obj.collaborators: + try: + log.info(f"Performing operations for {collaborator.collaborator_name}") + collaborator.create_collaborator() + collaborator.generate_sign_request() + # Below step will add collaborator entries in cols.yaml file. + fed_obj.aggregator.sign_collaborator_csr(collaborator.collaborator_name) + collaborator.import_certify_csr() + except Exception as e: + log.error(f"Failed to perform collaborator operations: {e}") + raise e + success = True + + log.info("CSR operations completed successfully for all participants") + return success + + +def run_federation(fed_obj): + """ + Start the federation + Args: + fed_obj (object): Federation fixture object + Returns: + list: List of response files for all the participants + """ + executor = concurrent.futures.ThreadPoolExecutor() + # As the collaborators will wait for aggregator to start, we need to start them in parallel. + futures = [ + executor.submit( + participant.start + ) + for participant in fed_obj.collaborators + [fed_obj.aggregator] + ] + + # Result will contain response files for all the participants. + results = [f.result() for f in futures] + return results + + +def verify_federation_run_completion(fed_obj, results): + """ + Verify the completion of the process for all the participants + Args: + fed_obj (object): Federation fixture object + results (list): List of results + Returns: + list: List of response (True or False) for all the participants + """ + log.info("Verifying the completion of the process for all the participants") + # Start the collaborators and aggregator + executor = concurrent.futures.ThreadPoolExecutor() + # As the collaborators will wait for aggregator to start, we need to start them in parallel. + futures = [ + executor.submit( + _verify_completion_for_participant, + participant, + results[i] + ) + for i, participant in enumerate(fed_obj.collaborators + [fed_obj.aggregator]) + ] + + # Result will contain a list of tuple of replica and operator objects. + results = [f.result() for f in futures] + log.info(f"Results: {results}") + + # If any of the participant failed, return False, else return True + return all(results) + + +def _verify_completion_for_participant(participant, result_file): + """ + Verify the completion of the process for the participant + Args: + participant (object): Participant object + result_file (str): Result file + Returns: + bool: True if successful, else False + """ + # Wait for the successful output message to appear in the log till timeout + timeout = 100000 # in seconds + log.info(f"Printing the last line of the log file for {participant.name} to track the progress") + with open(result_file, 'r') as file: + content = file.read() + start_time = time.time() + while ( + "OK" not in content and time.time() - start_time < timeout + ): + with open(result_file, 'r') as file: + content = file.read() + # Print last 2 lines of the log file on screen to track the progress + log.info(f"{participant.name}: {content.splitlines()[-1:]}") + if "OK" in content: + break + log.info(f"Process is yet to complete for {participant.name}") + time.sleep(45) + + if "OK" not in content: + log.error(f"Process failed/is incomplete for {participant.name} after timeout of {timeout} seconds") + return False + else: + log.info(f"Process completed for {participant.name} in {time.time() - start_time} seconds") + return True diff --git a/tests/openfl_e2e/utils/logger.py b/tests/openfl_e2e/utils/logger.py new file mode 100644 index 0000000000..b42d4c9754 --- /dev/null +++ b/tests/openfl_e2e/utils/logger.py @@ -0,0 +1,38 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import logging + +# Get the logger instance configured in conftest.py +logger = logging.getLogger() + + +def configure_logging(log_file, log_level): + """ + Configures logging for the application. + + This function sets up logging to a specified file and the console with the given log level. + It formats the log messages to include the timestamp, logger name, log level, filename, + function name, and the actual log message. + + Args: + log_file (str): Path to the log file. + log_level (int): Logging level (e.g., logging.DEBUG, logging.INFO). + + Raises: + OSError: If there is an issue with creating the log file handler. + """ + formatter = logging.Formatter( + "\n%(asctime)s - %(levelname)s: [%(filename)s - %(funcName)s]: %(message)s", datefmt="%Y-%m-%d %H:%M:%S" + ) + handler = logging.FileHandler(log_file) + handler.setFormatter(formatter) + handler.setLevel(log_level) + + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + console_handler.setLevel(log_level) + logger = logging.getLogger() + logger.setLevel(log_level) + logger.addHandler(handler) + logger.addHandler(console_handler) diff --git a/tests/openfl_e2e/utils/subprocess_helper.py b/tests/openfl_e2e/utils/subprocess_helper.py new file mode 100644 index 0000000000..f9f272d726 --- /dev/null +++ b/tests/openfl_e2e/utils/subprocess_helper.py @@ -0,0 +1,126 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import subprocess +import time +import traceback + +from tests.openfl_e2e.utils.logger import logger as log + + +def run_command_background( + cmd, return_error=False, print_stdout=False, work_dir=None, redirect_to_file=None, check_sleep=1 +): + """Execute a command and let it run in background. + + Args: + cmd (Union[str, list]): Command to execute. + Can be a shell type string or a list of command and args. + e.g. ['ps', '-ef'], ['/bin/bash/', script.sh], './script.sh' + return_error: Whether to return error message. This has no effect. + print_stdout: If True and the process completes immediately, print the stdout. + This is obsolete. Will always print debug output and errors. + Output will be truncated to 10 lines. + work_dir: Directory from which to run the command. Current directory if None. + redirect_to_file: The file descriptor to which the STDERR and STDOUT will be written. + check_sleep: Time in seconds to sleep before polling to make sure + the background process is still running. + + Returns: + Popen object of the subprocess. None, if the command completed immediately. + """ + if isinstance(cmd, list): + shell = False + else: + shell = True + + if redirect_to_file: + output_redirect = redirect_to_file + error_redirect = subprocess.STDOUT + else: + output_redirect = subprocess.PIPE + error_redirect = subprocess.PIPE + process = subprocess.Popen( + cmd, stdout=output_redirect, stderr=error_redirect, shell=shell, text=True, cwd=work_dir + ) + time.sleep(check_sleep) + return_code = process.poll() + if return_code is None: + return process + elif return_code != 0: + if redirect_to_file: + log.info( + "The background process has been writing STDERR and STDOUT to a file passed in as 'redirect_to_file' arg" + ) + else: + error = process.stderr.read().rstrip("\n") + log.warning(f"Error is: {error}") + log.error(f"Error Traceback: {traceback.print_exc()}") + raise subprocess.CalledProcessError(returncode=return_code, cmd=cmd) + else: + log.warning("Process for Command completed instantly.") + if redirect_to_file: + log.info( + "The background process has been writing STDERR and STDOUT to a file passed in as 'redirect_to_file' arg" + ) + else: + output = process.stdout.read().rstrip("\n").split("\n") + if print_stdout and output is not None: + log.info(f"Command to run - {cmd} output - {output}") + return None + + +def run_command( + cmd, return_error=True, print_stdout=False, work_dir=None, timeout=None, check=True +): + """ + Execute the command using subprocess and log the output to logger. + + Args: + cmd (str or list): The command to run. + return_error (bool): Whether to return errors or raise them. + print_stdout (bool): Whether to print the standard output. + work_dir (str): The working directory for the command. + timeout (int): The timeout in seconds for the command to complete. + check (bool): Whether to check for errors after command execution. + + Returns: + tuple: (return_code, output, error) + """ + if isinstance(cmd, list): + shell = False + else: + shell = True + + try: + result = subprocess.run( + cmd, capture_output=True, shell=shell, text=True, cwd=work_dir, check=check, timeout=timeout + ) + except subprocess.CalledProcessError as e: + log.error(f"Command '{cmd}' failed with return code {e.returncode}") + log.error(f"Error output: {e.stderr}") + if not return_error: + raise + return e.returncode, [], [e.stderr] + except Exception as e: + log.error(f"Failed to execute command '{cmd}': {str(e)}") + log.error(f"Error Traceback: {traceback.format_exc()}") + if not return_error: + raise + return -1, [], [str(e)] + + output = result.stdout.splitlines() + error = result.stderr.splitlines() + + if result.returncode == 0: + log.info(f"Successfully ran command: {cmd}") + if print_stdout: + log.info(f"Command output: {result.stdout}") + else: + log.error(f"Subprocess command '{cmd}' returned non-zero return_code [{result.returncode}]:") + log.error(f"stderr: {result.stderr}") + log.error(f"stdout: {result.stdout}") + if not return_error: + raise subprocess.CalledProcessError(returncode=result.returncode, cmd=cmd, stderr=result.stderr) + + return result.returncode, output, error diff --git a/tests/openfl_e2e/utils/xml_helper.py b/tests/openfl_e2e/utils/xml_helper.py new file mode 100644 index 0000000000..56ced867db --- /dev/null +++ b/tests/openfl_e2e/utils/xml_helper.py @@ -0,0 +1,75 @@ +# Copyright 2024-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import xml.etree.ElementTree as ET +from lxml import etree +import os + +# Initialize the XML parser +parser = etree.XMLParser(recover=True, encoding='utf-8') +tree = ET.parse("results/results.xml", parser=parser) + +# Get the root element +testsuites = tree.getroot() + + +def get_test_status(result): + """ + Get the test status/verdict + Args + result: the result object to check` + Returns + status of the test status + """ + status = "FAILED" + if "failure" in result.tag or "error" in result.tag: + # If the result has a tag "failure", set status as "FAIL" + status = "FAILED" + elif "skipped" in result.tag: + # If the result has a tag "skipped", set status as "SKIPPED" + status = "SKIPPED" + else: + status = "PASSED" + return status + + +def get_testcase_result(): + """ + Get the test case results from the XML file + """ + database_list = [] + status = None + # Iterate over each testsuite in testsuites + for testsuite in testsuites: + # Populate testcase details in a dictionary + for testcase in testsuite: + database_dict = {} + if testcase.attrib.get("name"): + database_dict["name"] = testcase.attrib.get("name") + database_dict["time"] = testcase.attrib.get("time") + + # Successful test won't have any result/subtag + if len(testcase) == 0: + database_dict["result"] = "PASSED" + + # Iterate over each result in testsuite + for result in testcase: + status = get_test_status(result) + database_dict["result"] = status + + # Append the dictionary to database_list + database_list.append(database_dict) + status = None + + print(f"Database list = {database_list}") + return database_list + + +result = get_testcase_result() + +# Write the results to GitHub step summary +with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: + print("| Name | Time (in seconds) | Result |", file=fh) + print("| ------------- | ------------- | ------------- |", file=fh) + for item in result: + print(f"| {item['name']} | {item['time']} | {item['result']} |", file=fh) From 63ee840e25e4f155003dc83df95b3b6f7af14eb2 Mon Sep 17 00:00:00 2001 From: noopur Date: Fri, 8 Nov 2024 12:07:25 +0000 Subject: [PATCH 07/62] Logging fix Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- tests/openfl_e2e/utils/xml_helper.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 69e5bb3dc0..ba5c5c06c7 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -79,7 +79,7 @@ jobs: id: run_task_runner_tests run: | pytest -v tests/openfl_e2e/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} -s --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} - echo "Task runner regression test run completed" + echo "Task runner e2e test run completed" env: NO_PROXY: localhost,127.0.0.1,aggregator diff --git a/tests/openfl_e2e/utils/xml_helper.py b/tests/openfl_e2e/utils/xml_helper.py index 56ced867db..7fa753ea65 100644 --- a/tests/openfl_e2e/utils/xml_helper.py +++ b/tests/openfl_e2e/utils/xml_helper.py @@ -5,6 +5,8 @@ from lxml import etree import os +from tests.openfl_e2e.utils.logger import logger as log + # Initialize the XML parser parser = etree.XMLParser(recover=True, encoding='utf-8') tree = ET.parse("results/results.xml", parser=parser) @@ -61,7 +63,7 @@ def get_testcase_result(): database_list.append(database_dict) status = None - print(f"Database list = {database_list}") + log.info(f"Database list = {database_list}") return database_list @@ -69,6 +71,7 @@ def get_testcase_result(): # Write the results to GitHub step summary with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: + # DO NOT change the print statements print("| Name | Time (in seconds) | Result |", file=fh) print("| ------------- | ------------- | ------------- |", file=fh) for item in result: From 80c93931cbbf274add07de3f0a72fceff7ed2ea0 Mon Sep 17 00:00:00 2001 From: gbikkiintel Date: Fri, 8 Nov 2024 12:35:51 -0600 Subject: [PATCH 08/62] Updated pull request types and disabled triggering PRs on drafts --- .github/workflows/docker-bench-security.yml | 2 ++ .github/workflows/dockerization.yml | 2 ++ .github/workflows/double_ws_export.yml | 2 ++ .github/workflows/experimental_workflow_tests.yml | 3 ++- .github/workflows/gandlf.yml | 3 ++- .github/workflows/hadolint.yml | 3 ++- .github/workflows/interactive-kvasir.yml | 3 ++- .github/workflows/interactive-tensorflow.yml | 2 ++ .github/workflows/lint.yml | 2 ++ .github/workflows/pki.yml | 3 ++- .github/workflows/pytest_coverage.yml | 3 ++- .github/workflows/straggler-handling.yml | 2 ++ .github/workflows/taskrunner.yml | 2 ++ .github/workflows/taskrunner_eden_pipeline.yml | 5 +++-- .github/workflows/trivy.yml | 3 +++ .github/workflows/workflow_interface_101_mnist.yml | 2 ++ 16 files changed, 34 insertions(+), 8 deletions(-) diff --git a/.github/workflows/docker-bench-security.yml b/.github/workflows/docker-bench-security.yml index 3b5211f668..588c454187 100644 --- a/.github/workflows/docker-bench-security.yml +++ b/.github/workflows/docker-bench-security.yml @@ -3,12 +3,14 @@ name: Docker Bench for Security on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest timeout-minutes: 10 diff --git a/.github/workflows/dockerization.yml b/.github/workflows/dockerization.yml index 07e41c95b7..81d29f5f2a 100644 --- a/.github/workflows/dockerization.yml +++ b/.github/workflows/dockerization.yml @@ -4,12 +4,14 @@ name: Dockerization on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest timeout-minutes: 10 diff --git a/.github/workflows/double_ws_export.yml b/.github/workflows/double_ws_export.yml index 5f614cb720..bf9fe50965 100644 --- a/.github/workflows/double_ws_export.yml +++ b/.github/workflows/double_ws_export.yml @@ -6,6 +6,7 @@ name: Double workspace export on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read @@ -16,6 +17,7 @@ env: jobs: build: + if: github.event.pull_request.draft == false runs-on: 'ubuntu-latest' steps: diff --git a/.github/workflows/experimental_workflow_tests.yml b/.github/workflows/experimental_workflow_tests.yml index ab217cc6a5..ab039e5e52 100644 --- a/.github/workflows/experimental_workflow_tests.yml +++ b/.github/workflows/experimental_workflow_tests.yml @@ -5,13 +5,14 @@ on: branches: [ develop ] pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: - if: contains(github.event.pull_request.labels.*.name, 'workflow_interface') + if: (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'workflow_interface')) runs-on: ubuntu-latest steps: diff --git a/.github/workflows/gandlf.yml b/.github/workflows/gandlf.yml index ff90a39c63..8a63492aa9 100644 --- a/.github/workflows/gandlf.yml +++ b/.github/workflows/gandlf.yml @@ -6,6 +6,7 @@ name: GaNDLF TaskRunner on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read @@ -16,7 +17,7 @@ env: jobs: build: - + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index aebc58fcca..265c457c43 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -6,13 +6,14 @@ name: Hadolint Security Scan on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: - + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/interactive-kvasir.yml b/.github/workflows/interactive-kvasir.yml index 830da7abc5..93ff09126c 100644 --- a/.github/workflows/interactive-kvasir.yml +++ b/.github/workflows/interactive-kvasir.yml @@ -6,13 +6,14 @@ name: Interactive API - Pytorch Kvasir UNet on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: - + if: github.event.pull_request.draft == false strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] diff --git a/.github/workflows/interactive-tensorflow.yml b/.github/workflows/interactive-tensorflow.yml index 4958ca139b..110ee1b175 100644 --- a/.github/workflows/interactive-tensorflow.yml +++ b/.github/workflows/interactive-tensorflow.yml @@ -6,12 +6,14 @@ name: Interactive API - Tensorflow MNIST on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest # Add Windows support after https://github.com/keras-team/keras/issues/16308 is merged steps: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1c7f9fe4aa..57e33ab9d2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -6,12 +6,14 @@ name: Check code format on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: + if: github.event.pull_request.draft == false runs-on: ubuntu-latest diff --git a/.github/workflows/pki.yml b/.github/workflows/pki.yml index 20471ef650..7d4f90df5c 100644 --- a/.github/workflows/pki.yml +++ b/.github/workflows/pki.yml @@ -6,6 +6,7 @@ name: Private Key Infrastructure on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read @@ -16,7 +17,7 @@ env: jobs: test_insecure_client: - + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/pytest_coverage.yml b/.github/workflows/pytest_coverage.yml index 6d50288ca8..f0543b62d1 100644 --- a/.github/workflows/pytest_coverage.yml +++ b/.github/workflows/pytest_coverage.yml @@ -6,6 +6,7 @@ name: Pytest and code coverage on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: permissions: @@ -17,7 +18,7 @@ env: jobs: build: - + if: github.event.pull_request.draft == false runs-on: ubuntu-latest steps: diff --git a/.github/workflows/straggler-handling.yml b/.github/workflows/straggler-handling.yml index 9eead301db..dfb463104e 100644 --- a/.github/workflows/straggler-handling.yml +++ b/.github/workflows/straggler-handling.yml @@ -6,6 +6,7 @@ name: Straggler Handling Test on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read @@ -16,6 +17,7 @@ env: jobs: build: + if: github.event.pull_request.draft == false strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] diff --git a/.github/workflows/taskrunner.yml b/.github/workflows/taskrunner.yml index ea172e0732..1ae8a5af8b 100644 --- a/.github/workflows/taskrunner.yml +++ b/.github/workflows/taskrunner.yml @@ -6,6 +6,7 @@ name: TaskRunner on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read @@ -16,6 +17,7 @@ env: jobs: build: + if: github.event.pull_request.draft == false strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] diff --git a/.github/workflows/taskrunner_eden_pipeline.yml b/.github/workflows/taskrunner_eden_pipeline.yml index cc35747ca5..e103fcf2aa 100644 --- a/.github/workflows/taskrunner_eden_pipeline.yml +++ b/.github/workflows/taskrunner_eden_pipeline.yml @@ -5,14 +5,15 @@ name: TaskRunner (Eden Compression) on: pull_request: - branches: [ develop ] + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] permissions: contents: read jobs: build: - if: contains(github.event.pull_request.labels.*.name, 'eden_compression') + if: (github.event.pull_request.draft == false && contains(github.event.pull_request.labels.*.name, 'eden_compression')) strategy: matrix: os: ['ubuntu-latest', 'windows-latest'] diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 40e3cafbfd..0ef7e20e8c 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -3,8 +3,11 @@ on: push: branches: [ develop ] pull_request: + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] jobs: build: + if: github.event.pull_request.draft == false permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results diff --git a/.github/workflows/workflow_interface_101_mnist.yml b/.github/workflows/workflow_interface_101_mnist.yml index 1f2c75c95a..a980d1088d 100644 --- a/.github/workflows/workflow_interface_101_mnist.yml +++ b/.github/workflows/workflow_interface_101_mnist.yml @@ -7,6 +7,7 @@ name: Workflow Interface 101 MNIST Notebook on: pull_request: branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: @@ -15,6 +16,7 @@ permissions: jobs: run_notebook: + if: github.event.pull_request.draft == false runs-on: ubuntu-22.04 steps: - name: Checkout OpenFL repository From c90426d773e9dc640a84321500e3abb910d3b7c1 Mon Sep 17 00:00:00 2001 From: noopur Date: Sat, 9 Nov 2024 08:47:36 +0000 Subject: [PATCH 09/62] Fix code format issue Signed-off-by: noopur --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 535b6d5d52..758a396935 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,4 @@ pytest==8.3.3 pytest-asyncio==0.24.0 pytest-mock==3.14.0 -ruamel.yaml \ No newline at end of file +ruamel.yaml From eda91a5f7bbbb7fcf74549656e5123c9ead617b0 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 04:38:44 +0000 Subject: [PATCH 10/62] Modified openfl_e2e to end_to_end along with other occurrences Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 10 ++++----- tests/{openfl_e2e => end_to_end}/README.md | 9 ++++---- tests/{openfl_e2e => end_to_end}/__init__.py | 0 tests/{openfl_e2e => end_to_end}/conftest.py | 10 ++++----- .../models/participants.py | 6 ++--- tests/{openfl_e2e => end_to_end}/pytest.ini | 0 .../test_suites/task_runner_tests.py | 4 ++-- .../utils/conftest_helper.py | 2 +- .../utils/constants.py | 0 .../utils/federation_helper.py | 2 +- .../utils/logger.py | 0 .../utils/subprocess_helper.py | 2 +- .../utils/xml_helper.py | 2 +- tests/openfl_e2e/requirements.txt | 7 ------ tests/openfl_e2e/test_suites/sample_tests.py | 22 ------------------- 15 files changed, 23 insertions(+), 53 deletions(-) rename tests/{openfl_e2e => end_to_end}/README.md (91%) rename tests/{openfl_e2e => end_to_end}/__init__.py (100%) rename tests/{openfl_e2e => end_to_end}/conftest.py (97%) rename tests/{openfl_e2e => end_to_end}/models/participants.py (98%) rename tests/{openfl_e2e => end_to_end}/pytest.ini (100%) rename tests/{openfl_e2e => end_to_end}/test_suites/task_runner_tests.py (96%) rename tests/{openfl_e2e => end_to_end}/utils/conftest_helper.py (96%) rename tests/{openfl_e2e => end_to_end}/utils/constants.py (100%) rename tests/{openfl_e2e => end_to_end}/utils/federation_helper.py (98%) rename tests/{openfl_e2e => end_to_end}/utils/logger.py (100%) rename tests/{openfl_e2e => end_to_end}/utils/subprocess_helper.py (98%) rename tests/{openfl_e2e => end_to_end}/utils/xml_helper.py (97%) delete mode 100644 tests/openfl_e2e/requirements.txt delete mode 100644 tests/openfl_e2e/test_suites/sample_tests.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index ba5c5c06c7..5a12ae3e57 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -1,5 +1,5 @@ #--------------------------------------------------------------------------- -# Workflow to run Task Runner E2E tests +# Workflow to run Task Runner end to end tests # Authors - Noopur, Payal Chaurasiya #--------------------------------------------------------------------------- name: Task Runner E2E @@ -67,7 +67,7 @@ jobs: run: | python -m pip install --upgrade pip pip install . - pip install -r tests/openfl_e2e/requirements.txt + pip install -r test-requirements.txt - name: Add runner IP to /etc/hosts id: add_runner_ip @@ -78,8 +78,8 @@ jobs: - name: Run Task Runner E2E tests id: run_task_runner_tests run: | - pytest -v tests/openfl_e2e/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} -s --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} - echo "Task runner e2e test run completed" + pytest -v tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} -s --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} + echo "Task runner end to end test run completed" env: NO_PROXY: localhost,127.0.0.1,aggregator @@ -87,7 +87,7 @@ jobs: id: print_test_summary if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' run: | - python tests/openfl_e2e/utils/xml_helper.py + python tests/end_to_end/utils/xml_helper.py echo "Test summary printed" - name: Tar files # Tar the test results only if the tests were run diff --git a/tests/openfl_e2e/README.md b/tests/end_to_end/README.md similarity index 91% rename from tests/openfl_e2e/README.md rename to tests/end_to_end/README.md index 167252d47e..32d46d74a8 100644 --- a/tests/openfl_e2e/README.md +++ b/tests/end_to_end/README.md @@ -4,15 +4,14 @@ This project is a machine learning workspace that includes various models and te ## Project Structure -openfl_e2e +end_to_end ├── models # Central location for all model-related code for testing purpose ├── test_suites # Folder containing test files ├── utils # Folder containing helper files ├── __init__.py # To mark test directory as a Python package ├── conftest.py # Pytest framework configuration file ├── pytest.ini # Pytest initialisation file -├── README.md # Readme file -└── requirements.txt # Pytest specific requirements file +└── README.md # Readme file ## Pre-requisites @@ -23,7 +22,7 @@ Setup virtual environment and install OpenFL using [online documentation](https: To install the required dependencies on above virtual environment, run: ```sh -pip install -r requirements.txt +pip install -r test-requirements.txt ``` ## Usage @@ -42,7 +41,7 @@ To run a specific test case, use below command: ### Output Structure -openfl_e2e +end_to_end ├── results ├── # Based on the workspace name provided during test run. ├── results.xml # Output file in JUNIT. diff --git a/tests/openfl_e2e/__init__.py b/tests/end_to_end/__init__.py similarity index 100% rename from tests/openfl_e2e/__init__.py rename to tests/end_to_end/__init__.py diff --git a/tests/openfl_e2e/conftest.py b/tests/end_to_end/conftest.py similarity index 97% rename from tests/openfl_e2e/conftest.py rename to tests/end_to_end/conftest.py index 48752c0910..28bfd0551a 100644 --- a/tests/openfl_e2e/conftest.py +++ b/tests/end_to_end/conftest.py @@ -8,11 +8,11 @@ import xml.etree.ElementTree as ET import logging -from tests.openfl_e2e.utils.logger import configure_logging -from tests.openfl_e2e.utils.logger import logger as log -from tests.openfl_e2e.utils.conftest_helper import parse_arguments -import tests.openfl_e2e.utils.constants as constants -import tests.openfl_e2e.models.participants as participants +from tests.end_to_end.utils.logger import configure_logging +from tests.end_to_end.utils.logger import logger as log +from tests.end_to_end.utils.conftest_helper import parse_arguments +import tests.end_to_end.utils.constants as constants +import tests.end_to_end.models.participants as participants # Define a named tuple to store the objects for model owner, aggregator, and collaborators federation_fixture = collections.namedtuple( diff --git a/tests/openfl_e2e/models/participants.py b/tests/end_to_end/models/participants.py similarity index 98% rename from tests/openfl_e2e/models/participants.py rename to tests/end_to_end/models/participants.py index 99cd22df6c..845216b647 100644 --- a/tests/openfl_e2e/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -5,9 +5,9 @@ import ruamel.yaml from datetime import datetime -import tests.openfl_e2e.utils.constants as constants -from tests.openfl_e2e.utils.logger import logger as log -import tests.openfl_e2e.utils.subprocess_helper as sh +import tests.end_to_end.utils.constants as constants +from tests.end_to_end.utils.logger import logger as log +import tests.end_to_end.utils.subprocess_helper as sh yaml = ruamel.yaml.YAML() yaml.preserve_quotes = True diff --git a/tests/openfl_e2e/pytest.ini b/tests/end_to_end/pytest.ini similarity index 100% rename from tests/openfl_e2e/pytest.ini rename to tests/end_to_end/pytest.ini diff --git a/tests/openfl_e2e/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py similarity index 96% rename from tests/openfl_e2e/test_suites/task_runner_tests.py rename to tests/end_to_end/test_suites/task_runner_tests.py index 595916bc36..1e31421dcc 100644 --- a/tests/openfl_e2e/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -3,8 +3,8 @@ import pytest -from tests.openfl_e2e.utils.logger import logger as log -from tests.openfl_e2e.utils import federation_helper as fed_helper +from tests.end_to_end.utils.logger import logger as log +from tests.end_to_end.utils import federation_helper as fed_helper @pytest.mark.torch_cnn_mnist diff --git a/tests/openfl_e2e/utils/conftest_helper.py b/tests/end_to_end/utils/conftest_helper.py similarity index 96% rename from tests/openfl_e2e/utils/conftest_helper.py rename to tests/end_to_end/utils/conftest_helper.py index 563d232751..d8159ad9a0 100644 --- a/tests/openfl_e2e/utils/conftest_helper.py +++ b/tests/end_to_end/utils/conftest_helper.py @@ -4,7 +4,7 @@ import argparse import sys -from tests.openfl_e2e.utils.logger import logger as log +from tests.end_to_end.utils.logger import logger as log def parse_arguments(): diff --git a/tests/openfl_e2e/utils/constants.py b/tests/end_to_end/utils/constants.py similarity index 100% rename from tests/openfl_e2e/utils/constants.py rename to tests/end_to_end/utils/constants.py diff --git a/tests/openfl_e2e/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py similarity index 98% rename from tests/openfl_e2e/utils/federation_helper.py rename to tests/end_to_end/utils/federation_helper.py index c5a7cefd34..6eb2843e5e 100644 --- a/tests/openfl_e2e/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -4,7 +4,7 @@ import time import concurrent.futures -from tests.openfl_e2e.utils.logger import logger as log +from tests.end_to_end.utils.logger import logger as log def perform_csr_operations(fed_obj): diff --git a/tests/openfl_e2e/utils/logger.py b/tests/end_to_end/utils/logger.py similarity index 100% rename from tests/openfl_e2e/utils/logger.py rename to tests/end_to_end/utils/logger.py diff --git a/tests/openfl_e2e/utils/subprocess_helper.py b/tests/end_to_end/utils/subprocess_helper.py similarity index 98% rename from tests/openfl_e2e/utils/subprocess_helper.py rename to tests/end_to_end/utils/subprocess_helper.py index f9f272d726..988395f625 100644 --- a/tests/openfl_e2e/utils/subprocess_helper.py +++ b/tests/end_to_end/utils/subprocess_helper.py @@ -5,7 +5,7 @@ import time import traceback -from tests.openfl_e2e.utils.logger import logger as log +from tests.end_to_end.utils.logger import logger as log def run_command_background( diff --git a/tests/openfl_e2e/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py similarity index 97% rename from tests/openfl_e2e/utils/xml_helper.py rename to tests/end_to_end/utils/xml_helper.py index 7fa753ea65..dcd0dda222 100644 --- a/tests/openfl_e2e/utils/xml_helper.py +++ b/tests/end_to_end/utils/xml_helper.py @@ -5,7 +5,7 @@ from lxml import etree import os -from tests.openfl_e2e.utils.logger import logger as log +from tests.end_to_end.utils.logger import logger as log # Initialize the XML parser parser = etree.XMLParser(recover=True, encoding='utf-8') diff --git a/tests/openfl_e2e/requirements.txt b/tests/openfl_e2e/requirements.txt deleted file mode 100644 index e9190362e7..0000000000 --- a/tests/openfl_e2e/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -lxml -pytest -pytest-html -pytest-ordering -pytest-xdist -pyyaml -ruamel.yaml diff --git a/tests/openfl_e2e/test_suites/sample_tests.py b/tests/openfl_e2e/test_suites/sample_tests.py deleted file mode 100644 index b12f28833c..0000000000 --- a/tests/openfl_e2e/test_suites/sample_tests.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2024-2025 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from tests.openfl_e2e.utils.logger import logger as log - - -# This file contains sample test functions to be run by pytest - -# Function to be tested -def add(a, b): - return a + b - - -# Test function -def test_add(): - log.info("Running test_add") - assert add(1, 2) == 3 - assert add(-1, 1) == 0 - assert add(0, 0) == 0 - log.info("test_add passed") From 790c98eed4be77f43ba309fc39afe3aef7b54e82 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 07:16:42 +0000 Subject: [PATCH 11/62] Error handling in participants file Signed-off-by: noopur --- tests/end_to_end/models/participants.py | 57 ++++++++++++++++++++----- 1 file changed, 46 insertions(+), 11 deletions(-) diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 845216b647..73a5b2e032 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -42,10 +42,14 @@ def create_workspace(self, results_dir=None): """ try: results_dir = results_dir if results_dir else os.getcwd() - sh.run_command( + return_code, _, error = sh.run_command( f"fx workspace create --prefix {self.workspace_name} --template {self.model_name}", work_dir=results_dir, ) + if return_code != 0: + log.error(f"Failed to create the workspace: {error}") + raise Exception(f"Failed to create the workspace: {error}") + log.info(f"Created the workspace {self.workspace_name} for the {self.model_name} model") self.workspace_path = os.path.join(results_dir, self.workspace_name) log.info(f"Workspace path: {self.workspace_path}") @@ -112,8 +116,12 @@ def initialize_plan(self, agg_domain_name): """ try: log.info("Initializing the plan. It will take some time to complete..") - sh.run_command(f"fx plan initialize -a {agg_domain_name}", work_dir=self.workspace_path) - log.info(f"Initialized the plan for the workspace {self.workspace_name}") + return_code, _, error = sh.run_command(f"fx plan initialize -a {agg_domain_name}", work_dir=self.workspace_path) + if return_code != 0: + log.error(f"Failed to initialize the plan: {error}") + raise Exception(f"Failed to initialize the plan: {error}") + + log.info(f"Initialized the plan for the workspace {self.workspace_name}") except Exception as e: log.error(f"Failed to initialize the plan: {e}") raise e @@ -126,8 +134,12 @@ def certify_workspace(self): bool: True if successful, else False """ try: - sh.run_command("fx workspace certify", work_dir=self.workspace_path) - log.info(f"Certified the workspace {self.workspace_name}") + return_code, _, error = sh.run_command("fx workspace certify", work_dir=self.workspace_path) + if return_code != 0: + log.error(f"Failed to certify the workspace: {error}") + raise Exception(f"Failed to certify the workspace: {error}") + + log.info(f"Certified the workspace {self.workspace_name}") except Exception as e: log.error(f"Failed to certify the workspace: {e}") raise e @@ -140,7 +152,11 @@ def export_workspace(self): bool: True if successful, else False """ try: - sh.run_command("fx workspace export", work_dir=self.workspace_path) + return_code, _, error = sh.run_command("fx workspace export", work_dir=self.workspace_path) + if return_code != 0: + log.error(f"Failed to export the workspace: {error}") + raise Exception(f"Failed to export the workspace: {error}") + log.info(f"Exported the workspace") except Exception as e: log.error(f"Failed to export the workspace: {e}") @@ -156,9 +172,13 @@ def import_workspace(self, workspace_zip): bool: True if successful, else False """ try: - sh.run_command( + return_code, _, error = sh.run_command( f"fx workspace import --archive {workspace_zip}", work_dir=self.workspace_path ) + if return_code != 0: + log.error(f"Failed to import the workspace: {error}") + raise Exception(f"Failed to import the workspace: {error}") + log.info(f"Imported the workspace") except Exception as e: log.error(f"Failed to import the workspace: {e}") @@ -187,10 +207,14 @@ def generate_sign_request(self): bool: True if successful, else False """ try: - sh.run_command( + return_code, _, error = sh.run_command( f"fx aggregator generate-cert-request --fqdn {self.agg_domain_name}", work_dir=self.workspace_path, ) + if return_code != 0: + log.error(f"Failed to generate the sign request: {error}") + raise Exception(f"Failed to generate the sign request: {error}") + log.info(f"Generated a sign request for {self.name}") except Exception as e: log.error(f"Failed to generate the sign request: {e}") @@ -205,10 +229,14 @@ def certify_request(self): """ log.info(f"CA should sign the aggregator {self.name} request") try: - sh.run_command( + return_code, _, error = sh.run_command( f"fx aggregator certify --silent --fqdn {self.agg_domain_name}", work_dir=self.workspace_path, ) + if return_code != 0: + log.error(f"Failed to certify the aggregator request: {error}") + raise Exception(f"Failed to certify the aggregator request: {error}") + log.info(f"CA signed the request from {self.name}") except Exception as e: log.error(f"Failed to certify the aggregator request : {e}") @@ -294,10 +322,14 @@ def generate_sign_request(self): bool: True if successful, else False """ try: - sh.run_command( + return_code, _, error = sh.run_command( f"fx collaborator generate-cert-request -n {self.collaborator_name}", work_dir=self.workspace_path, ) + if return_code != 0: + log.error(f"Failed to generate the sign request: {error}") + raise Exception(f"Failed to generate the sign request: {error}") + log.info(f"Generated a sign request for {self.collaborator_name}") except Exception as e: log.error(f"Failed to generate the sign request: {e}") @@ -311,10 +343,13 @@ def create_collaborator(self): bool: True if successful, else False """ try: - sh.run_command( + return_code, _, error = sh.run_command( f"fx collaborator create -n {self.collaborator_name} -d {self.data_directory_path}", work_dir=self.workspace_path, ) + if return_code != 0: + log.error(f"Failed to create the collaborator: {error}") + raise Exception(f"Failed to create the collaborator: {error}") log.info( f"Created {self.collaborator_name} with the data directory {self.data_directory_path}" ) From 1dccbba4b7a8d3dace5af2a2dd4f7389b82cb477 Mon Sep 17 00:00:00 2001 From: Teodor Parvanov Date: Mon, 11 Nov 2024 08:37:44 +0100 Subject: [PATCH 12/62] Revert "Fixing code formatting issues reported by flake8" This reverts commit e554695917cdf008c6091715e434d5f077f90068. Signed-off-by: Teodor Parvanov --- openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py | 6 +++--- .../keras_cnn_with_compression/src/tfmnist_inmemory.py | 6 +++--- openfl-workspace/torch_cnn_histology/src/dataloader.py | 6 +++--- openfl-workspace/torch_cnn_mnist/src/dataloader.py | 6 +++--- .../src/ptmnist_inmemory.py | 6 +++--- .../torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py | 6 +++--- .../torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py | 6 +++--- openfl-workspace/torch_unet_kvasir/src/data_loader.py | 6 +++--- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py b/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py index bbad002681..51f4ccf739 100644 --- a/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py +++ b/openfl-workspace/keras_cnn_mnist/src/tfmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # collaborator list. try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py b/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py index f932502e31..80b913e5f5 100644 --- a/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py +++ b/openfl-workspace/keras_cnn_with_compression/src/tfmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # collaborator list. try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_histology/src/dataloader.py b/openfl-workspace/torch_cnn_histology/src/dataloader.py index b0655cf5f0..fa4ae86778 100644 --- a/openfl-workspace/torch_cnn_histology/src/dataloader.py +++ b/openfl-workspace/torch_cnn_histology/src/dataloader.py @@ -39,10 +39,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist/src/dataloader.py b/openfl-workspace/torch_cnn_mnist/src/dataloader.py index 0557e81af4..3f3eeeb0bb 100644 --- a/openfl-workspace/torch_cnn_mnist/src/dataloader.py +++ b/openfl-workspace/torch_cnn_mnist/src/dataloader.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py index ecd9f777e4..74c8ec5d03 100644 --- a/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_eden_compression/src/ptmnist_inmemory.py @@ -29,10 +29,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py index 0438e5d812..324545a763 100644 --- a/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_fed_eval/src/ptmnist_inmemory.py @@ -29,10 +29,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py b/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py index 508bea12d0..ccf234239c 100644 --- a/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py +++ b/openfl-workspace/torch_cnn_mnist_straggler_check/src/ptmnist_inmemory.py @@ -28,10 +28,10 @@ def __init__(self, data_path, batch_size, **kwargs): # of collaborator list. try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) diff --git a/openfl-workspace/torch_unet_kvasir/src/data_loader.py b/openfl-workspace/torch_unet_kvasir/src/data_loader.py index 9d454b28af..0f968808e4 100644 --- a/openfl-workspace/torch_unet_kvasir/src/data_loader.py +++ b/openfl-workspace/torch_unet_kvasir/src/data_loader.py @@ -124,10 +124,10 @@ def __init__(self, data_path, batch_size, **kwargs): try: int(data_path) - except ValueError: + except: raise ValueError( - "Expected `%s` to be representable as `int`, as it refers to the data shard " - + "number used by the collaborator.", + "Expected `%s` to be representable as `int`, as it refers to the data shard " + + "number used by the collaborator.", data_path ) From 296f0cf1f6686c5aca41aacddbf3308ac539a6c2 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 08:50:43 +0000 Subject: [PATCH 13/62] README.md update Signed-off-by: noopur --- tests/end_to_end/README.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 32d46d74a8..e0633f6186 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -4,6 +4,7 @@ This project is a machine learning workspace that includes various models and te ## Project Structure +``` end_to_end ├── models # Central location for all model-related code for testing purpose ├── test_suites # Folder containing test files @@ -12,6 +13,7 @@ end_to_end ├── conftest.py # Pytest framework configuration file ├── pytest.ini # Pytest initialisation file └── README.md # Readme file +``` ## Pre-requisites @@ -31,21 +33,27 @@ pip install -r test-requirements.txt To run all the test cases under test_suites, use the following command: -```python -m pytest -s``` +```sh +python -m pytest -s +``` To run a specific test case, use below command: -```python -m pytest test_suites/ -k -s``` +```sh +python -m pytest test_suites/ -k -s +``` ** -s will ensure all the logs are printed on screen. Ignore, if not required. ### Output Structure +``` end_to_end ├── results ├── # Based on the workspace name provided during test run. ├── results.xml # Output file in JUNIT. ├── deployment.log # Log file containing step by step test progress. +``` ## Contribution Please ensure that you have tested your changes thoroughly before submitting a pull request. From b70e4e1dd880d4b233f2a6ef43e5ad89594c7760 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Fri, 8 Nov 2024 12:45:10 +0530 Subject: [PATCH 14/62] Install Gramine in base image Signed-off-by: Shah, Karan --- openfl-docker/Dockerfile.base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openfl-docker/Dockerfile.base b/openfl-docker/Dockerfile.base index c85f5ca619..139767b544 100644 --- a/openfl-docker/Dockerfile.base +++ b/openfl-docker/Dockerfile.base @@ -1,7 +1,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # ------------------------------------ -# OpenFL Base Image +# OpenFL Base Image w/ Gramine support # $> docker build . -t openfl -f Dockerfile.base [--build-arg OPENFL_REVISION=GIT_URL@COMMIT_ID] # ------------------------------------ FROM ubuntu:22.04 AS base From 69f7ba36eb0d4b9133815251f44372a40d0e73cf Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 10:27:32 +0000 Subject: [PATCH 15/62] Incorporated review comments Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 6 +- tests/end_to_end/README.md | 29 +++-- tests/end_to_end/__init__.py | 2 +- tests/end_to_end/conftest.py | 5 +- tests/end_to_end/models/participants.py | 105 +++++++++--------- tests/end_to_end/pytest.ini | 2 - tests/end_to_end/test_suites/sample_tests.py | 34 ++++++ .../test_suites/task_runner_tests.py | 36 +----- tests/end_to_end/utils/conftest_helper.py | 2 +- tests/end_to_end/utils/constants.py | 4 +- tests/end_to_end/utils/federation_helper.py | 20 ++-- tests/end_to_end/utils/logger.py | 2 +- tests/end_to_end/utils/subprocess_helper.py | 2 +- tests/end_to_end/utils/xml_helper.py | 2 +- 14 files changed, 129 insertions(+), 122 deletions(-) create mode 100644 tests/end_to_end/test_suites/sample_tests.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 5a12ae3e57..7bb9baa311 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -32,13 +32,11 @@ jobs: test_run: name: test runs-on: ubuntu-22.04 - + timeout-minutes: 120 # 2 hours strategy: matrix: # There are open issues for some of the models, so excluding them for now: - # 1. https://github.com/securefederatedai/openfl/issues/1126 - # 2. https://github.com/securefederatedai/openfl/issues/1127 - # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology", "tf_2dunet", "tf_cnn_histology" ] + # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] python_version: [ "3.8", "3.9", "3.10" ] fail-fast: false # do not immediately fail if one of the combinations fail diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index e0633f6186..4db236cd81 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -1,11 +1,11 @@ -# Project Title +# End-to-end Pytest Framework -This project is a machine learning workspace that includes various models and test suites. It is structured to facilitate the development, testing, and deployment of machine learning models. +This project aims at integration testing of ```openfl-workspace``` using pytest framework. -## Project Structure +## Test Structure ``` -end_to_end +tests/end_to_end ├── models # Central location for all model-related code for testing purpose ├── test_suites # Folder containing test files ├── utils # Folder containing helper files @@ -17,7 +17,8 @@ end_to_end ## Pre-requisites -Setup virtual environment and install OpenFL using [online documentation](https://openfl.readthedocs.io/en/latest/get_started/installation.html). +1. Setup virtual environment and install OpenFL using [online documentation](https://openfl.readthedocs.io/en/latest/get_started/installation.html). +2. Ensure that the OpenFL workspace (inside openfl-workspace) is present for the model being tested. If not, create it first. ## Installation @@ -31,28 +32,26 @@ pip install -r test-requirements.txt ### Running Tests -To run all the test cases under test_suites, use the following command: - -```sh -python -m pytest -s -``` - To run a specific test case, use below command: ```sh -python -m pytest test_suites/ -k -s +python -m pytest tests/end_to_end/test_suites/ -k -s ``` ** -s will ensure all the logs are printed on screen. Ignore, if not required. +To modify the number of collaborators, rounds to train and/or model name, use below parameters: +1. --num_collaborators +2. --num_rounds +3. --model_name + ### Output Structure ``` -end_to_end -├── results +results ├── # Based on the workspace name provided during test run. ├── results.xml # Output file in JUNIT. - ├── deployment.log # Log file containing step by step test progress. + └── deployment.log # Log file containing step by step test progress. ``` ## Contribution diff --git a/tests/end_to_end/__init__.py b/tests/end_to_end/__init__.py index c057ed1f6b..de233d6a7a 100644 --- a/tests/end_to_end/__init__.py +++ b/tests/end_to_end/__init__.py @@ -1,3 +1,3 @@ -# Copyright (C) 2024-2025 Intel Corporation +# Copyright (C) 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """Tests package.""" diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 28bfd0551a..b6a904bf47 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -204,11 +204,14 @@ def fx_federation(request, pytestconfig): """ Fixture for federation. This fixture is used to create the model owner, aggregator, and collaborators. It also creates workspace. + Assumption: OpenFL workspace is present for the model being tested. Args: request: pytest request object. Model name is passed as a parameter to the fixture from test cases. pytestconfig: pytest config object Returns: federation_fixture: Named tuple containing the objects for model owner, aggregator, and collaborators + + Note: As this is a module level fixture, thus no import is required at test level. """ log.info("Fixture for federation setup") collaborators = [] diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 73a5b2e032..c262801381 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import os @@ -22,6 +22,9 @@ class ModelOwner: def __init__(self, workspace_name, model_name): """ Initialize the ModelOwner class + Args: + workspace_name (str): Workspace name + model_name (str): Model name """ self.workspace_name = workspace_name self.model_name = model_name @@ -77,6 +80,34 @@ def get_workspace_path(self, results_dir, workspace_name): raise FileNotFoundError(f"Workspace {workspace_name} does not exist in {results_dir}") return self.workspace_path + def sign_collaborator_csr(self, collaborator_name): + """ + Sign the CSR for the collaborator + Args: + collaborator_name (str): Name of the collaborator + Returns: + bool: True if successful, else False + """ + try: + zip_name = f"col_{collaborator_name}_to_agg_cert_request.zip" + col_zip = os.path.join(os.getcwd(), self.workspace_path, zip_name) + return_code, output, error = sh.run_command( + f"fx collaborator certify --request-pkg {col_zip} -s", work_dir=self.workspace_path + ) + msg_received = [line for line in output if constants.SUCCESS_MARKER in line] + log.info(f"Message received: {msg_received}") + if return_code == 0 and len(msg_received): + log.info( + f"Successfully signed the CSR for the collaborator {collaborator_name} with zip path {col_zip}" + ) + else: + log.error(f"Failed to sign the CSR for collaborator {collaborator_name}: {error}") + + except Exception as e: + log.error(f"Failed to sign the CSR: {e}") + raise e + return True + def modify_plan(self, new_rounds=None, num_collaborators=None): """ Modify the plan to train the model @@ -145,6 +176,28 @@ def certify_workspace(self): raise e return True + def certify_request(self): + """ + Certify the aggregator request + Returns: + bool: True if successful, else False + """ + log.info(f"CA should sign the aggregator {self.name} request") + try: + return_code, _, error = sh.run_command( + f"fx aggregator certify --silent --fqdn {self.agg_domain_name}", + work_dir=self.workspace_path, + ) + if return_code != 0: + log.error(f"Failed to certify the aggregator request: {error}") + raise Exception(f"Failed to certify the aggregator request: {error}") + + log.info(f"CA signed the request from {self.name}") + except Exception as e: + log.error(f"Failed to certify the aggregator request : {e}") + raise e + return True + def export_workspace(self): """ Export the workspace @@ -221,56 +274,6 @@ def generate_sign_request(self): raise e return True - def certify_request(self): - """ - Certify the aggregator request - Returns: - bool: True if successful, else False - """ - log.info(f"CA should sign the aggregator {self.name} request") - try: - return_code, _, error = sh.run_command( - f"fx aggregator certify --silent --fqdn {self.agg_domain_name}", - work_dir=self.workspace_path, - ) - if return_code != 0: - log.error(f"Failed to certify the aggregator request: {error}") - raise Exception(f"Failed to certify the aggregator request: {error}") - - log.info(f"CA signed the request from {self.name}") - except Exception as e: - log.error(f"Failed to certify the aggregator request : {e}") - raise e - return True - - def sign_collaborator_csr(self, collaborator_name): - """ - Sign the CSR for the collaborator - Args: - collaborator_name (str): Name of the collaborator - Returns: - bool: True if successful, else False - """ - try: - zip_name = f"col_{collaborator_name}_to_agg_cert_request.zip" - col_zip = os.path.join(os.getcwd(), self.workspace_path, zip_name) - return_code, output, error = sh.run_command( - f"fx collaborator certify --request-pkg {col_zip} -s", work_dir=self.workspace_path - ) - msg_received = [line for line in output if constants.SUCCESS_MARKER in line] - log.info(f"Message received: {msg_received}") - if return_code == 0 and len(msg_received): - log.info( - f"Successfully signed the CSR for the collaborator {collaborator_name} with zip path {col_zip}" - ) - else: - log.error(f"Failed to sign the CSR for collaborator {collaborator_name}: {error}") - - except Exception as e: - log.error(f"Failed to sign the CSR: {e}") - raise e - return True - def start(self): """ Start the aggregator diff --git a/tests/end_to_end/pytest.ini b/tests/end_to_end/pytest.ini index 9f23293c5a..bfa1f84164 100644 --- a/tests/end_to_end/pytest.ini +++ b/tests/end_to_end/pytest.ini @@ -8,5 +8,3 @@ markers = torch_cnn_mnist: mark a test as a torch CNN MNIST test. keras_cnn_mnist: mark a test as a Keras CNN MNIST test. torch_cnn_histology: mark a test as a torch CNN histology test. - tf_2dunet: mark a test as a tf 2D U-Net test. - tf_cnn_histology: mark a test as a tf CNN histology test. diff --git a/tests/end_to_end/test_suites/sample_tests.py b/tests/end_to_end/test_suites/sample_tests.py new file mode 100644 index 0000000000..d6c1ee0f4d --- /dev/null +++ b/tests/end_to_end/test_suites/sample_tests.py @@ -0,0 +1,34 @@ +# Copyright 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from tests.end_to_end.utils.logger import logger as log +from tests.end_to_end.utils import federation_helper as fed_helper + + +# ** IMPORTANT **: This is just an example on how to add a test with below pre-requisites. +# Task Runner API Test function for federation run using sample_model +# 1. Create OpenFL workspace, if not present for the model and add relevant dataset and its path in plan/data.yaml +# 2. Append the model name to ModelName enum in tests/end_to_end/utils/constants.py +# 3. Add the model name to tests/end_to_end/pytest.ini marker, if not present +# 4. Use fx_federation fixture in the test function - it will provide the federation object. +# 5. Fixture will contain - model_owner, aggregator, collaborators, model_name, workspace_path, results_dir +# 6. Setup PKI for trusted communication within the federation +# 7. Start the federation using aggregator and given no of collaborators. +# 8. Verify the completion of the federation run. + +@pytest.mark.sample_model +def test_sample_model(fx_federation): + """ + Add a proper docstring here. + """ + log.info(f"Running sample model test {fx_federation.model_name}") + # Setup PKI for trusted communication within the federation + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI" + + # Start the federation + results = fed_helper.run_federation(fx_federation) + + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 1e31421dcc..32dd1a2eb0 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest @@ -15,7 +15,7 @@ def test_torch_cnn_mnist(fx_federation): log.info(f"Test for torch_cnn_mnist with fx_federation: {fx_federation}") # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -29,7 +29,7 @@ def test_keras_cnn_mnist(fx_federation): log.info(f"Test for keras_cnn_mnist with fx_federation: {fx_federation}") # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -46,35 +46,7 @@ def test_torch_cnn_histology(fx_federation): log.info(f"Test for torch_cnn_histology with fx_federation: {fx_federation}") # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" - - # Start the federation - results = fed_helper.run_federation(fx_federation) - - # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" - - -@pytest.mark.tf_2dunet -def test_tf_2dunet(fx_federation): - log.info(f"Test for tf_2dunet with fx_federation: {fx_federation}") - - # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" - - # Start the federation - results = fed_helper.run_federation(fx_federation) - - # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" - - -@pytest.mark.tf_cnn_histology -def test_tf_cnn_histology(fx_federation): - log.info(f"Test for tf_cnn_histology with fx_federation: {fx_federation}") - - # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.perform_csr_operations(fx_federation), "Failed to perform CSR operations" + assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" # Start the federation results = fed_helper.run_federation(fx_federation) diff --git a/tests/end_to_end/utils/conftest_helper.py b/tests/end_to_end/utils/conftest_helper.py index d8159ad9a0..352e25ba4b 100644 --- a/tests/end_to_end/utils/conftest_helper.py +++ b/tests/end_to_end/utils/conftest_helper.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse diff --git a/tests/end_to_end/utils/constants.py b/tests/end_to_end/utils/constants.py index c0312e1ea8..0b724c7ced 100644 --- a/tests/end_to_end/utils/constants.py +++ b/tests/end_to_end/utils/constants.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from enum import Enum @@ -13,8 +13,6 @@ class ModelName(Enum): TORCH_CNN_MNIST = "torch_cnn_mnist" KERAS_CNN_MNIST = "keras_cnn_mnist" TORCH_CNN_HISTOLOGY = "torch_cnn_histology" - TF_2DUNET = "tf_2dunet" - TF_CNN_HISTOLOGY = "tf_cnn_histology" NUM_COLLABORATORS = 2 NUM_ROUNDS = 5 diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index 6eb2843e5e..f77a29c2b6 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -1,15 +1,17 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import time import concurrent.futures +from tests.end_to_end.utils.constants import SUCCESS_MARKER from tests.end_to_end.utils.logger import logger as log -def perform_csr_operations(fed_obj): +def setup_pki(fed_obj): """ - Perform CSR operations like generating sign request, certifying request, etc. + Setup PKI for trusted communication within the federation + Args: fed_obj (object): Federation fixture object Returns: @@ -20,7 +22,7 @@ def perform_csr_operations(fed_obj): try: log.info(f"Performing operations for {fed_obj.aggregator.name}") fed_obj.aggregator.generate_sign_request() - fed_obj.aggregator.certify_request() + fed_obj.model_owner.certify_request() except Exception as e: log.error(f"Failed to perform aggregator operations: {e}") raise e @@ -32,7 +34,7 @@ def perform_csr_operations(fed_obj): collaborator.create_collaborator() collaborator.generate_sign_request() # Below step will add collaborator entries in cols.yaml file. - fed_obj.aggregator.sign_collaborator_csr(collaborator.collaborator_name) + fed_obj.model_owner.sign_collaborator_csr(collaborator.collaborator_name) collaborator.import_certify_csr() except Exception as e: log.error(f"Failed to perform collaborator operations: {e}") @@ -105,24 +107,24 @@ def _verify_completion_for_participant(participant, result_file): bool: True if successful, else False """ # Wait for the successful output message to appear in the log till timeout - timeout = 100000 # in seconds + timeout = 900 # in seconds log.info(f"Printing the last line of the log file for {participant.name} to track the progress") with open(result_file, 'r') as file: content = file.read() start_time = time.time() while ( - "OK" not in content and time.time() - start_time < timeout + SUCCESS_MARKER not in content and time.time() - start_time < timeout ): with open(result_file, 'r') as file: content = file.read() # Print last 2 lines of the log file on screen to track the progress log.info(f"{participant.name}: {content.splitlines()[-1:]}") - if "OK" in content: + if SUCCESS_MARKER in content: break log.info(f"Process is yet to complete for {participant.name}") time.sleep(45) - if "OK" not in content: + if SUCCESS_MARKER not in content: log.error(f"Process failed/is incomplete for {participant.name} after timeout of {timeout} seconds") return False else: diff --git a/tests/end_to_end/utils/logger.py b/tests/end_to_end/utils/logger.py index b42d4c9754..b3e9d95311 100644 --- a/tests/end_to_end/utils/logger.py +++ b/tests/end_to_end/utils/logger.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import logging diff --git a/tests/end_to_end/utils/subprocess_helper.py b/tests/end_to_end/utils/subprocess_helper.py index 988395f625..d11abc2f2c 100644 --- a/tests/end_to_end/utils/subprocess_helper.py +++ b/tests/end_to_end/utils/subprocess_helper.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import subprocess diff --git a/tests/end_to_end/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py index dcd0dda222..489641dd46 100644 --- a/tests/end_to_end/utils/xml_helper.py +++ b/tests/end_to_end/utils/xml_helper.py @@ -1,4 +1,4 @@ -# Copyright 2024-2025 Intel Corporation +# Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import xml.etree.ElementTree as ET From d463717063f9cefe949d8e6f4831c86c0c3ab0f3 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 11:04:51 +0000 Subject: [PATCH 16/62] Final changes after testing Signed-off-by: noopur --- test-requirements.txt | 1 - tests/end_to_end/conftest.py | 2 +- tests/end_to_end/models/participants.py | 41 ++++++++++++------- tests/end_to_end/test_suites/sample_tests.py | 2 +- .../test_suites/task_runner_tests.py | 18 ++++---- tests/end_to_end/utils/federation_helper.py | 10 ++--- 6 files changed, 43 insertions(+), 31 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index 758a396935..80ed75cde5 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,3 @@ pytest==8.3.3 pytest-asyncio==0.24.0 pytest-mock==3.14.0 -ruamel.yaml diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index b6a904bf47..337c465d98 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -210,7 +210,7 @@ def fx_federation(request, pytestconfig): pytestconfig: pytest config object Returns: federation_fixture: Named tuple containing the objects for model owner, aggregator, and collaborators - + Note: As this is a module level fixture, thus no import is required at test level. """ log.info("Fixture for federation setup") diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index c262801381..7d85aba5b2 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -2,21 +2,24 @@ # SPDX-License-Identifier: Apache-2.0 import os -import ruamel.yaml from datetime import datetime +import yaml import tests.end_to_end.utils.constants as constants from tests.end_to_end.utils.logger import logger as log import tests.end_to_end.utils.subprocess_helper as sh -yaml = ruamel.yaml.YAML() -yaml.preserve_quotes = True - # Define the ModelOwner class class ModelOwner: """ ModelOwner class to handle the model related operations. + Note: Aggregator can also act as a model owner. + This includes (non-exhaustive list): + 1. Creating the workspace - to create a workspace using given workspace and model names. + 2. Modifying based on input params provided and initializing the plan. + 3. Certifying the workspace and setting up the PKI. + 4. Importing and exporting the workspace etc. """ def __init__(self, workspace_name, model_name): @@ -80,7 +83,7 @@ def get_workspace_path(self, results_dir, workspace_name): raise FileNotFoundError(f"Workspace {workspace_name} does not exist in {results_dir}") return self.workspace_path - def sign_collaborator_csr(self, collaborator_name): + def setup_pki(self, collaborator_name): """ Sign the CSR for the collaborator Args: @@ -124,7 +127,7 @@ def modify_plan(self, new_rounds=None, num_collaborators=None): self.num_collaborators = num_collaborators if num_collaborators else self.num_collaborators with open(self.plan_path) as fp: - data = yaml.load(fp) + data = yaml.load(fp, Loader=yaml.FullLoader) data["aggregator"]["settings"]["rounds_to_train"] = int(self.rounds_to_train) data["data_loader"]["settings"]["collaborator_count"] = int(self.num_collaborators) @@ -152,7 +155,7 @@ def initialize_plan(self, agg_domain_name): log.error(f"Failed to initialize the plan: {error}") raise Exception(f"Failed to initialize the plan: {error}") - log.info(f"Initialized the plan for the workspace {self.workspace_name}") + log.info(f"Initialized the plan for the workspace {self.workspace_name}") except Exception as e: log.error(f"Failed to initialize the plan: {e}") raise e @@ -169,30 +172,32 @@ def certify_workspace(self): if return_code != 0: log.error(f"Failed to certify the workspace: {error}") raise Exception(f"Failed to certify the workspace: {error}") - - log.info(f"Certified the workspace {self.workspace_name}") + + log.info(f"Certified the workspace {self.workspace_name}") except Exception as e: log.error(f"Failed to certify the workspace: {e}") raise e return True - def certify_request(self): + def certify_agg_request(self, agg_domain_name): """ Certify the aggregator request + Args: + agg_domain_name (str): Aggregator domain name Returns: bool: True if successful, else False """ - log.info(f"CA should sign the aggregator {self.name} request") + log.info(f"CA should sign the aggregator request") try: return_code, _, error = sh.run_command( - f"fx aggregator certify --silent --fqdn {self.agg_domain_name}", + f"fx aggregator certify --silent --fqdn {agg_domain_name}", work_dir=self.workspace_path, ) if return_code != 0: log.error(f"Failed to certify the aggregator request: {error}") raise Exception(f"Failed to certify the aggregator request: {error}") - log.info(f"CA signed the request from {self.name}") + log.info(f"CA signed the request from aggregator") except Exception as e: log.error(f"Failed to certify the aggregator request : {e}") raise e @@ -243,6 +248,9 @@ def import_workspace(self, workspace_zip): class Aggregator: """ Aggregator class to handle the aggregator operations. + This includes (non-exhaustive list): + 1. Generating the sign request + 2. Starting the aggregator """ def __init__(self, agg_domain_name=None, workspace_path=None): @@ -307,6 +315,11 @@ def start(self): class Collaborator: """ Collaborator class to handle the collaborator operations. + This includes (non-exhaustive list): + 1. Generating the sign request + 2. Creating the collaborator + 3. Importing and certifying the CSR + 4. Starting the collaborator """ def __init__(self, collaborator_name=None, data_directory_path=None, workspace_path=None): @@ -361,7 +374,7 @@ def create_collaborator(self): raise e return True - def import_certify_csr(self): + def import_pki(self): """ Import and certify the CSR for the collaborator Returns: diff --git a/tests/end_to_end/test_suites/sample_tests.py b/tests/end_to_end/test_suites/sample_tests.py index d6c1ee0f4d..3009eb8324 100644 --- a/tests/end_to_end/test_suites/sample_tests.py +++ b/tests/end_to_end/test_suites/sample_tests.py @@ -7,7 +7,7 @@ from tests.end_to_end.utils import federation_helper as fed_helper -# ** IMPORTANT **: This is just an example on how to add a test with below pre-requisites. +# ** IMPORTANT **: This is just an example on how to add a test with below pre-requisites. # Task Runner API Test function for federation run using sample_model # 1. Create OpenFL workspace, if not present for the model and add relevant dataset and its path in plan/data.yaml # 2. Append the model name to ModelName enum in tests/end_to_end/utils/constants.py diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 32dd1a2eb0..8fa05d3611 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -12,10 +12,10 @@ def test_torch_cnn_mnist(fx_federation): """ Test for torch_cnn_mnist model. """ - log.info(f"Test for torch_cnn_mnist with fx_federation: {fx_federation}") + log.info("Testing torch_cnn_mnist model") - # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" + # Setup PKI for trusted communication within the federation + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -26,10 +26,10 @@ def test_torch_cnn_mnist(fx_federation): @pytest.mark.keras_cnn_mnist def test_keras_cnn_mnist(fx_federation): - log.info(f"Test for keras_cnn_mnist with fx_federation: {fx_federation}") + log.info("Testing keras_cnn_mnist model") - # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" + # Setup PKI for trusted communication within the federation + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -43,10 +43,10 @@ def test_torch_cnn_histology(fx_federation): """ Test for torch_cnn_histology model """ - log.info(f"Test for torch_cnn_histology with fx_federation: {fx_federation}") + log.info("Testing torch_cnn_histology model") - # Perform CSR operations like generating sign request, certifying request, etc. - assert fed_helper.setup_pki(fx_federation), "Failed to perform CSR operations" + # Setup PKI for trusted communication within the federation + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index f77a29c2b6..b8c593b692 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -18,24 +18,24 @@ def setup_pki(fed_obj): bool: True if successful, else False """ success = False - # Aggregator operations + # Aggregator and model owner operations try: log.info(f"Performing operations for {fed_obj.aggregator.name}") fed_obj.aggregator.generate_sign_request() - fed_obj.model_owner.certify_request() + fed_obj.model_owner.certify_agg_request(fed_obj.aggregator.agg_domain_name) except Exception as e: log.error(f"Failed to perform aggregator operations: {e}") raise e - # Collaborator operations + # Collaborator and model owner operations for collaborator in fed_obj.collaborators: try: log.info(f"Performing operations for {collaborator.collaborator_name}") collaborator.create_collaborator() collaborator.generate_sign_request() # Below step will add collaborator entries in cols.yaml file. - fed_obj.model_owner.sign_collaborator_csr(collaborator.collaborator_name) - collaborator.import_certify_csr() + fed_obj.model_owner.setup_pki(collaborator.collaborator_name) + collaborator.import_pki() except Exception as e: log.error(f"Failed to perform collaborator operations: {e}") raise e From f76dd12804b337cee2a5f3ab11954abf789c956a Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 12:25:03 +0000 Subject: [PATCH 17/62] Sample tests file mentioned in README.md Signed-off-by: noopur --- tests/end_to_end/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 4db236cd81..055873d1eb 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -15,6 +15,8 @@ tests/end_to_end └── README.md # Readme file ``` +** File `sample_tests.py` provided under `test_suites` acts as a reference on how to add a new test case. + ## Pre-requisites 1. Setup virtual environment and install OpenFL using [online documentation](https://openfl.readthedocs.io/en/latest/get_started/installation.html). From 62005fcdb57ac16ee638a94ad5330c2443904023 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 13:09:59 +0000 Subject: [PATCH 18/62] Added lxml to test requirements file Signed-off-by: noopur --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index 80ed75cde5..19bf081db1 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,3 +1,4 @@ +lxml==5.3.0 pytest==8.3.3 pytest-asyncio==0.24.0 pytest-mock==3.14.0 From 67eb6cd7e5a77ec908209452c8eb88dbf54a1d52 Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 13:45:32 +0000 Subject: [PATCH 19/62] Removed __init__.py file and corrected certify functions Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 6 +++++- .gitignore | 1 + tests/end_to_end/README.md | 1 - tests/end_to_end/__init__.py | 3 --- tests/end_to_end/models/participants.py | 4 ++-- tests/end_to_end/utils/federation_helper.py | 4 ++-- 6 files changed, 10 insertions(+), 9 deletions(-) delete mode 100644 tests/end_to_end/__init__.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 7bb9baa311..07ad49440c 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,6 +5,9 @@ name: Task Runner E2E on: + pull_request: # To be removed once the tests are stable + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: @@ -76,7 +79,7 @@ jobs: - name: Run Task Runner E2E tests id: run_task_runner_tests run: | - pytest -v tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} -s --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" env: NO_PROXY: localhost,127.0.0.1,aggregator @@ -85,6 +88,7 @@ jobs: id: print_test_summary if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' run: | + export PYTHONPATH="$PYTHONPATH:." python tests/end_to_end/utils/xml_helper.py echo "Test summary printed" diff --git a/.gitignore b/.gitignore index 578b6ed112..8a106933ef 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,4 @@ venv/* .eggs eggs/* *.pyi +results/* \ No newline at end of file diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 055873d1eb..824e349f71 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -9,7 +9,6 @@ tests/end_to_end ├── models # Central location for all model-related code for testing purpose ├── test_suites # Folder containing test files ├── utils # Folder containing helper files -├── __init__.py # To mark test directory as a Python package ├── conftest.py # Pytest framework configuration file ├── pytest.ini # Pytest initialisation file └── README.md # Readme file diff --git a/tests/end_to_end/__init__.py b/tests/end_to_end/__init__.py deleted file mode 100644 index de233d6a7a..0000000000 --- a/tests/end_to_end/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2020-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -"""Tests package.""" diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 7d85aba5b2..4881816f0e 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -83,7 +83,7 @@ def get_workspace_path(self, results_dir, workspace_name): raise FileNotFoundError(f"Workspace {workspace_name} does not exist in {results_dir}") return self.workspace_path - def setup_pki(self, collaborator_name): + def certify_collaborator(self, collaborator_name): """ Sign the CSR for the collaborator Args: @@ -179,7 +179,7 @@ def certify_workspace(self): raise e return True - def certify_agg_request(self, agg_domain_name): + def certify_aggregator(self, agg_domain_name): """ Certify the aggregator request Args: diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index b8c593b692..1b9d62ac6c 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -22,7 +22,7 @@ def setup_pki(fed_obj): try: log.info(f"Performing operations for {fed_obj.aggregator.name}") fed_obj.aggregator.generate_sign_request() - fed_obj.model_owner.certify_agg_request(fed_obj.aggregator.agg_domain_name) + fed_obj.model_owner.certify_aggregator(fed_obj.aggregator.agg_domain_name) except Exception as e: log.error(f"Failed to perform aggregator operations: {e}") raise e @@ -34,7 +34,7 @@ def setup_pki(fed_obj): collaborator.create_collaborator() collaborator.generate_sign_request() # Below step will add collaborator entries in cols.yaml file. - fed_obj.model_owner.setup_pki(collaborator.collaborator_name) + fed_obj.model_owner.certify_collaborator(collaborator.collaborator_name) collaborator.import_pki() except Exception as e: log.error(f"Failed to perform collaborator operations: {e}") From 93d2e94d050ab1699c1fd707b66610df2847cddf Mon Sep 17 00:00:00 2001 From: noopur Date: Mon, 11 Nov 2024 13:58:37 +0000 Subject: [PATCH 20/62] Revert pull_request trigger Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 07ad49440c..ab7d8ddd53 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,9 +5,6 @@ name: Task Runner E2E on: - pull_request: # To be removed once the tests are stable - branches: [ develop ] - types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: From a101555ce09d0ad663d2b232bdb89d6dac44ff60 Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 06:53:26 +0000 Subject: [PATCH 21/62] Review comments addressed Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 17 ++++++++++------- tests/end_to_end/README.md | 4 +--- tests/end_to_end/conftest.py | 5 ++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index ab7d8ddd53..56aaf9053e 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,6 +5,9 @@ name: Task Runner E2E on: + pull_request: + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: @@ -67,19 +70,19 @@ jobs: pip install . pip install -r test-requirements.txt - - name: Add runner IP to /etc/hosts - id: add_runner_ip - run: | - sudo echo "127.0.0.1 aggregator" | sudo tee -a /etc/hosts - echo "Added runner IP to /etc/hosts" + # - name: Add runner IP to /etc/hosts + # id: add_runner_ip + # run: | + # sudo echo "127.0.0.1 aggregator" | sudo tee -a /etc/hosts + # echo "Added runner IP to /etc/hosts" - name: Run Task Runner E2E tests id: run_task_runner_tests run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" - env: - NO_PROXY: localhost,127.0.0.1,aggregator + # env: + # NO_PROXY: localhost,127.0.0.1,aggregator - name: Print test summary # Print the test summary only if the tests were run id: print_test_summary diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 824e349f71..3971b67986 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -56,7 +56,5 @@ results ``` ## Contribution -Please ensure that you have tested your changes thoroughly before submitting a pull request. -## License -This project is licensed under [Apache License Version 2.0](LICENSE). By contributing to the project, you agree to the license and copyright terms therein and release your contribution under these terms. +https://github.com/securefederatedai/openfl/blob/develop/CONTRIBUTING.md diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 337c465d98..3ccffb0e0f 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -213,10 +213,9 @@ def fx_federation(request, pytestconfig): Note: As this is a module level fixture, thus no import is required at test level. """ - log.info("Fixture for federation setup") + log.info("Fixture for federation setup using Task Runner API on single machine.") collaborators = [] - # Default name for bare metal approach, modify as required. - agg_domain_name = "aggregator" + agg_domain_name = "localhost" # Parse the command line arguments args = parse_arguments() From 7783e5d94cfbc9603f8cc9dc6c6fb3b3fc55d7bc Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 07:04:06 +0000 Subject: [PATCH 22/62] Modified Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 14 ++++---------- tests/end_to_end/pytest.ini | 2 ++ 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 56aaf9053e..7ea8e090a4 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -40,8 +40,10 @@ jobs: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] - python_version: [ "3.8", "3.9", "3.10" ] + # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] + # python_version: [ "3.8", "3.9", "3.10" ] + model_name: [ "torch_cnn_mnist" ] + python_version: [ "3.10" ] fail-fast: false # do not immediately fail if one of the combinations fail env: @@ -70,19 +72,11 @@ jobs: pip install . pip install -r test-requirements.txt - # - name: Add runner IP to /etc/hosts - # id: add_runner_ip - # run: | - # sudo echo "127.0.0.1 aggregator" | sudo tee -a /etc/hosts - # echo "Added runner IP to /etc/hosts" - - name: Run Task Runner E2E tests id: run_task_runner_tests run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" - # env: - # NO_PROXY: localhost,127.0.0.1,aggregator - name: Print test summary # Print the test summary only if the tests were run id: print_test_summary diff --git a/tests/end_to_end/pytest.ini b/tests/end_to_end/pytest.ini index bfa1f84164..8d18441dd6 100644 --- a/tests/end_to_end/pytest.ini +++ b/tests/end_to_end/pytest.ini @@ -8,3 +8,5 @@ markers = torch_cnn_mnist: mark a test as a torch CNN MNIST test. keras_cnn_mnist: mark a test as a Keras CNN MNIST test. torch_cnn_histology: mark a test as a torch CNN histology test. +asyncio_mode=auto +asyncio_default_fixture_loop_scope="function" From 3204812690c3f43ad1005f92421dd65919bc3e6c Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 07:07:01 +0000 Subject: [PATCH 23/62] Revert testing changes Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 7ea8e090a4..a1e52df6a3 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,9 +5,6 @@ name: Task Runner E2E on: - pull_request: - branches: [ develop ] - types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: @@ -40,10 +37,8 @@ jobs: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] - # python_version: [ "3.8", "3.9", "3.10" ] - model_name: [ "torch_cnn_mnist" ] - python_version: [ "3.10" ] + model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] + python_version: [ "3.8", "3.9", "3.10" ] fail-fast: false # do not immediately fail if one of the combinations fail env: From a29ae7b87c9335774a838caa8fab093f8876e462 Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 07:54:11 +0000 Subject: [PATCH 24/62] Set FQDN as env variable for pytest coverage Signed-off-by: noopur --- .github/workflows/ubuntu.yml | 3 +++ .github/workflows/windows.yml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 3f26c797fc..d0d8db72c2 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -26,6 +26,9 @@ jobs: pytest-coverage: # from pytest_coverage.yml needs: lint runs-on: ubuntu-latest + env: + # A workaround for long FQDN names provided by GitHub actions. + FQDN: "localhost" steps: - uses: actions/checkout@v3 - name: Set up Python 3.8 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index f7d9ca30b9..d15cc60dba 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -10,6 +10,9 @@ permissions: jobs: pytest-coverage: # from pytest_coverage.yml runs-on: windows-latest + env: + # A workaround for long FQDN names provided by GitHub actions. + FQDN: "localhost" steps: - uses: actions/checkout@v3 - name: Set up Python 3.8 From 978eb6ebb0e09bcd2954581f00cb3cbb13971139 Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 08:00:15 +0000 Subject: [PATCH 25/62] Moving env variable outside Signed-off-by: noopur --- .github/workflows/ubuntu.yml | 7 ++++--- .github/workflows/windows.yml | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d0d8db72c2..bead9ebbdc 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -7,6 +7,10 @@ on: permissions: contents: read +env: + # A workaround for long FQDN names provided by GitHub actions. + FQDN: "localhost" + jobs: lint: # from lint.yml runs-on: ubuntu-latest @@ -26,9 +30,6 @@ jobs: pytest-coverage: # from pytest_coverage.yml needs: lint runs-on: ubuntu-latest - env: - # A workaround for long FQDN names provided by GitHub actions. - FQDN: "localhost" steps: - uses: actions/checkout@v3 - name: Set up Python 3.8 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index d15cc60dba..a9aac81654 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -7,12 +7,13 @@ on: permissions: contents: read +env: + # A workaround for long FQDN names provided by GitHub actions. + FQDN: "localhost" + jobs: pytest-coverage: # from pytest_coverage.yml runs-on: windows-latest - env: - # A workaround for long FQDN names provided by GitHub actions. - FQDN: "localhost" steps: - uses: actions/checkout@v3 - name: Set up Python 3.8 From 4e0f265462b0d1cd2b762cbde329ef8e2718d205 Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 10:55:15 +0000 Subject: [PATCH 26/62] Use logging directly after conftest initialisation Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 3 +++ tests/end_to_end/models/participants.py | 4 +++- tests/end_to_end/test_suites/sample_tests.py | 3 ++- tests/end_to_end/test_suites/task_runner_tests.py | 4 +++- tests/end_to_end/utils/conftest_helper.py | 3 ++- tests/end_to_end/utils/federation_helper.py | 4 +++- tests/end_to_end/utils/subprocess_helper.py | 3 ++- tests/end_to_end/utils/xml_helper.py | 3 --- 8 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index a1e52df6a3..e3d8573fad 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,6 +5,9 @@ name: Task Runner E2E on: + pull_request: + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 4881816f0e..0469868ea8 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -4,11 +4,13 @@ import os from datetime import datetime import yaml +import logging import tests.end_to_end.utils.constants as constants -from tests.end_to_end.utils.logger import logger as log import tests.end_to_end.utils.subprocess_helper as sh +log = logging.getLogger(__name__) + # Define the ModelOwner class class ModelOwner: diff --git a/tests/end_to_end/test_suites/sample_tests.py b/tests/end_to_end/test_suites/sample_tests.py index 3009eb8324..7c528277e8 100644 --- a/tests/end_to_end/test_suites/sample_tests.py +++ b/tests/end_to_end/test_suites/sample_tests.py @@ -2,10 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 import pytest +import logging -from tests.end_to_end.utils.logger import logger as log from tests.end_to_end.utils import federation_helper as fed_helper +log = logging.getLogger(__name__) # ** IMPORTANT **: This is just an example on how to add a test with below pre-requisites. # Task Runner API Test function for federation run using sample_model diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 8fa05d3611..a80c583acf 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -2,10 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import pytest +import logging -from tests.end_to_end.utils.logger import logger as log from tests.end_to_end.utils import federation_helper as fed_helper +log = logging.getLogger(__name__) + @pytest.mark.torch_cnn_mnist def test_torch_cnn_mnist(fx_federation): diff --git a/tests/end_to_end/utils/conftest_helper.py b/tests/end_to_end/utils/conftest_helper.py index 352e25ba4b..490a3316db 100644 --- a/tests/end_to_end/utils/conftest_helper.py +++ b/tests/end_to_end/utils/conftest_helper.py @@ -3,8 +3,9 @@ import argparse import sys +import logging -from tests.end_to_end.utils.logger import logger as log +log = logging.getLogger(__name__) def parse_arguments(): diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index 1b9d62ac6c..a4addbc49f 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -3,9 +3,11 @@ import time import concurrent.futures +import logging from tests.end_to_end.utils.constants import SUCCESS_MARKER -from tests.end_to_end.utils.logger import logger as log + +log = logging.getLogger(__name__) def setup_pki(fed_obj): diff --git a/tests/end_to_end/utils/subprocess_helper.py b/tests/end_to_end/utils/subprocess_helper.py index d11abc2f2c..ec09412762 100644 --- a/tests/end_to_end/utils/subprocess_helper.py +++ b/tests/end_to_end/utils/subprocess_helper.py @@ -4,8 +4,9 @@ import subprocess import time import traceback +import logging -from tests.end_to_end.utils.logger import logger as log +log = logging.getLogger(__name__) def run_command_background( diff --git a/tests/end_to_end/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py index 489641dd46..b3812acc3c 100644 --- a/tests/end_to_end/utils/xml_helper.py +++ b/tests/end_to_end/utils/xml_helper.py @@ -5,8 +5,6 @@ from lxml import etree import os -from tests.end_to_end.utils.logger import logger as log - # Initialize the XML parser parser = etree.XMLParser(recover=True, encoding='utf-8') tree = ET.parse("results/results.xml", parser=parser) @@ -63,7 +61,6 @@ def get_testcase_result(): database_list.append(database_dict) status = None - log.info(f"Database list = {database_list}") return database_list From cfec2e3a65f726006314e6f53cf84fa64c4e16bb Mon Sep 17 00:00:00 2001 From: noopur Date: Tue, 12 Nov 2024 11:02:41 +0000 Subject: [PATCH 27/62] Removed pull_request trigger after testing Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index e3d8573fad..a1e52df6a3 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -5,9 +5,6 @@ name: Task Runner E2E on: - pull_request: - branches: [ develop ] - types: [opened, synchronize, reopened, ready_for_review] schedule: - cron: '0 0 * * *' # Run every day at midnight workflow_dispatch: From 004e99f85b79a1c76ff7873f3dd665dd8e9a4faa Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Tue, 12 Nov 2024 22:00:07 +0530 Subject: [PATCH 28/62] Add enclave template Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/Makefile | 50 +++++++++++++ .../gramine_app/fx.manifest.template | 74 +++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 openfl-docker/gramine_app/Makefile create mode 100755 openfl-docker/gramine_app/fx.manifest.template diff --git a/openfl-docker/gramine_app/Makefile b/openfl-docker/gramine_app/Makefile new file mode 100644 index 0000000000..5d92ca3666 --- /dev/null +++ b/openfl-docker/gramine_app/Makefile @@ -0,0 +1,50 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +ARCH_LIBDIR ?= /lib/$(shell $(CC) -dumpmachine) + +# This is a signer key on the BUILDING machine +SGX_SIGNER_KEY ?= /key.pem + +ifeq ($(DEBUG),1) +GRAMINE_LOG_LEVEL = debug +else +GRAMINE_LOG_LEVEL = error +endif + +.PHONY: all +all: fx.manifest +ifeq ($(SGX),1) +all: fx.manifest.sgx fx.sig +endif + +fx.manifest: fx.manifest.template + @echo "Making fx.manifest file" +# disable checks temp - until gramine is pegged to proper release + gramine-manifest \ + -Dlog_level=$(GRAMINE_LOG_LEVEL) \ + -Darch_libdir=$(ARCH_LIBDIR) \ + -Dno_proxy=$(no_proxy) \ + -Dhttp_proxy=$(http_proxy) \ + -Dhttps_proxy=$(https_proxy) \ + -Dentrypoint=$(shell which fx) \ + $< >$@ + +fx.manifest.sgx: fx.manifest + @echo "Making fx.manifest.sgx file" + @test -s $(SGX_SIGNER_KEY) || \ + { echo "SGX signer private key was not found, please specify SGX_SIGNER_KEY!"; exit 1; } + @gramine-sgx-sign \ + --key $(SGX_SIGNER_KEY) \ + --manifest $< \ + --output $@ | tail -n 1 | tr -d ' ' | xargs -I {} echo "fx.mr_enclave={}" + +fx.sig: fx.manifest.sgx + +.PHONY: clean +clean: + $(RM) *.manifest *.manifest.sgx *.token *.sig OUTPUT* *.PID TEST_STDOUT TEST_STDERR + $(RM) -r scripts/__pycache__ + +.PHONY: distclean +distclean: clean diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template new file mode 100755 index 0000000000..23ba9ce61e --- /dev/null +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -0,0 +1,74 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# ================================== +# OpenFL Enclave for Gramine-SGX +# ================================== + +libos.entrypoint = "{{ entrypoint }}" +loader.entrypoint.uri = "file:{{ gramine.libos }}" + +loader.log_level = "{{ log_level }}" + +loader.env.OMP_NUM_THREADS = "16" +loader.env.LD_LIBRARY_PATH = "{{ arch_libdir }}:/usr/{{ arch_libdir }}:/lib:/usr/lib" +loader.env.SSL_CERT_DIR = "/etc/ssl/certs" +# loader.env.no_proxy = "{{ no_proxy }}" +# loader.env.https_proxy = "{{ https_proxy }}" +# loader.env.http_proxy = "{{ http_proxy }}" + +loader.insecure__use_cmdline_argv = true +loader.insecure__use_host_env = true + + +# URI - path on host +# PATH - pointer inside gramine +fs.start_dir = "/workspace" +fs.mounts = [ + { uri = "file:{{ gramine.runtimedir() }}", path = "/lib" }, + { uri = "file:{{ arch_libdir }}", path = "{{ arch_libdir }}" }, + { uri = "file:/usr", path = "/usr" }, + { uri = "file:/etc/ssl/certs", path = "/etc/ssl/certs" }, + { uri = "file:/workspace", path = "/workspace" }, + { uri = "file:/keys", path = "/keys" }, + { uri = "file:/host_save_path", path = "/host_save_path" }, + { type = "tmpfs", path = "/tmp" }, +] + +sgx.debug = false +sgx.preheat_enclave = false +sgx.enclave_size = "16G" + +sys.stack.size = "4M" +sys.enable_sigterm_injection = true +sys.enable_extra_runtime_domain_names_conf = true +# sys.brk.max_size = "1M" + +sgx.trusted_files = [ + "file:{{ gramine.libos }}", + "file:{{ entrypoint }}", + "file:{{ gramine.runtimedir() }}/", + "file:{{ arch_libdir }}/", + "file:/usr/{{ arch_libdir }}/", + "file:/etc/ssl/certs/", + "file:{{ python.stdlib }}/", + "file:{{ python.distlib }}/", +{% for path in python.get_sys_path('python') %} + "file:{{ path }}{{ '/' if path.is_dir() else '' }}", +{% endfor %} + "file:/workspace/src/", +] + +sgx.allowed_files = [ + "file:/workspace/save", + "file:/workspace/plan/", + "file:/workspace/logs", + "file:/workspace/cert", + "file:/keys", + "file:/host_save_path", + "file:/workspace/data", + "file:/workspace/plan/cols.yaml", + "file:/workspace/plan/data.yaml", + "file:/workspace/plan/plan.yaml", +] +sgx.remote_attestation = "dcap" +sgx.max_threads = 512 From 1965115a397e9dd917d48b6498f78c9d9add6dd6 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Tue, 12 Nov 2024 22:55:36 +0530 Subject: [PATCH 29/62] SGX option in dockerize Signed-off-by: Shah, Karan --- openfl/interface/workspace.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/openfl/interface/workspace.py b/openfl/interface/workspace.py index 260c71eed1..32c264a6a8 100644 --- a/openfl/interface/workspace.py +++ b/openfl/interface/workspace.py @@ -389,6 +389,22 @@ def export_() -> str: default=False, help="If set, rebuilds docker images with `--no-cache` option.", ) +@option( + "--sgx-ready", + is_flag=True, + default=False, + help="If set, builds an SGX-enabled OpenFL enclave.", +) +@option( + "--enclave-key", + "enclave_key", + type=str, + required=False, + help=( + "Path to an enclave signing key. If not provided, a new key will be generated. " + "This option is only valid when `--sgx-ready` is set." + ), +) @option( "--revision", required=False, @@ -401,7 +417,9 @@ def export_() -> str: ), ) @pass_context -def dockerize_(context, save, rebuild, revision): +def dockerize_( + context, save: bool, rebuild: bool, sgx_ready: bool, enclave_key: str, revision: str +): """Package current workspace as a Docker image.""" # Docker build options @@ -430,10 +448,24 @@ def dockerize_(context, save, rebuild, revision): _execute(base_image_build_cmd) # Build workspace image. + options = [] + options.append("--no-cache" if rebuild else "") + options = " ".join(options) + if enclave_key is None: + _execute("openssl genrsa -out key.pem -3 3072") + enclave_key = os.path.abspath("key.pem") + logging.info(f"Generated new enclave key: {enclave_key}") + else: + enclave_key = os.path.abspath(enclave_key) + if not os.path.exists(enclave_key): + raise FileNotFoundError(f"Enclave key `{enclave_key}` does not exist") + logging.info(f"Using enclave key: {enclave_key}") + logging.info("Building workspace image") ws_image_build_cmd = ( "DOCKER_BUILDKIT=1 docker build {options} " "--build-arg WORKSPACE_NAME={workspace_name} " + "--secret id=signer-key,src={enclave_key} " "-t {image_name} " "-f {dockerfile} " "{build_context}" @@ -441,6 +473,7 @@ def dockerize_(context, save, rebuild, revision): options=options, image_name=workspace_name, workspace_name=workspace_name, + enclave_key=enclave_key, dockerfile=os.path.join(SITEPACKS, "openfl-docker", "Dockerfile.workspace"), build_context=".", ) From b8a1f5742807ce76b343f153373867a24d0ac929 Mon Sep 17 00:00:00 2001 From: Kevin Ta <116312994+kta-intel@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:36:59 -0500 Subject: [PATCH 30/62] [FeTS][GaNDLF] skip loading `train.csv` during inference mode (#1141) * remove train.csv path during inference step Signed-off-by: kta-intel * check inference during dataloader Signed-off-by: kta-intel * lint fix Signed-off-by: kta-intel --------- Signed-off-by: kta-intel --- openfl/federated/data/loader_gandlf.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/openfl/federated/data/loader_gandlf.py b/openfl/federated/data/loader_gandlf.py index 6e1a04342a..648ebe2930 100644 --- a/openfl/federated/data/loader_gandlf.py +++ b/openfl/federated/data/loader_gandlf.py @@ -25,7 +25,10 @@ def __init__(self, data_path, feature_shape): data_path (str): The path to the directory containing the data. feature_shape (tuple): The shape of an example feature array. """ - self.train_csv = data_path + "/train.csv" + if "inference" in data_path: + self.train_csv = None + else: + self.train_csv = data_path + "/train.csv" self.val_csv = data_path + "/valid.csv" self.train_dataloader = None self.val_dataloader = None From 0314d9712d172f64076d35e38ed1b2de911d5f22 Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 05:54:57 +0000 Subject: [PATCH 31/62] Logic to print score and training details Signed-off-by: noopur --- tests/end_to_end/models/participants.py | 6 +- tests/end_to_end/utils/federation_helper.py | 3 +- tests/end_to_end/utils/xml_helper.py | 77 ++++++++++++++++++--- 3 files changed, 73 insertions(+), 13 deletions(-) diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 0469868ea8..8dfe95b4fc 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -292,8 +292,7 @@ def start(self): """ try: log.info(f"Starting {self.name}") - curr_time = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"{self.name}_{curr_time}.log" + filename = f"{self.name}.log" res_file = os.path.join(os.getcwd(), self.workspace_path, filename) bg_file = open(res_file, "w", buffering=1) @@ -412,8 +411,7 @@ def start(self): """ try: log.info(f"Starting {self.collaborator_name}") - curr_time = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"{self.collaborator_name}_{curr_time}.log" + filename = f"{self.collaborator_name}.log" res_file = os.path.join(os.getcwd(), self.workspace_path, filename) bg_file = open(res_file, "w", buffering=1) diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index a4addbc49f..3cb091b7ce 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -91,7 +91,8 @@ def verify_federation_run_completion(fed_obj, results): for i, participant in enumerate(fed_obj.collaborators + [fed_obj.aggregator]) ] - # Result will contain a list of tuple of replica and operator objects. + # Result will contain a list of boolean values for all the participants. + # True - successful completion, False - failed/incomplete results = [f.result() for f in futures] log.info(f"Results: {results}") diff --git a/tests/end_to_end/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py index b3812acc3c..06715c59f1 100644 --- a/tests/end_to_end/utils/xml_helper.py +++ b/tests/end_to_end/utils/xml_helper.py @@ -13,6 +13,47 @@ testsuites = tree.getroot() +def get_aggregator_logs(model_name): + """ + Get the aggregator logs to fetch the metric values and scores + Args: + model_name: the model name for which the aggregator logs are to be fetched + Returns: + tuple: the locally tuned model validation, train, aggregated model validation and score + """ + lt_mv, train, agg_mv, score = None, None, None, "NA" + + workspace_name = "workspace_" + model_name + agg_log_file = os.path.join("results", workspace_name, "aggregator.log") + + if not os.path.exists(agg_log_file): + print(f"Aggregator log file {agg_log_file} not found.") + else: + with open(agg_log_file, 'r') as f: + for raw_line in f: + # Log file contains aggregator.py: which gets concatenated with the actual log line if not stripped + line = raw_line.strip() if "aggregator.py:" not in raw_line else raw_line.split("aggregator.py:")[0].strip() + # Fetch the metric origin and aggregator details + if "metric_origin" in line and "aggregator" in line: + if "locally_tuned_model_validation" in line: + reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() + lt_mv = eval(reqd_line.split("METRIC")[1].strip('"')) + if "train" in line: + reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() + train = eval(reqd_line.split("METRIC")[1].strip('"')) + if "aggregated_model_validation" in line: + reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() + agg_mv = eval(reqd_line.split("METRIC")[1].strip('"')) + + # Fetch the best model details + if "saved the best model" in line: + reqd_line = line.strip() + score_line = reqd_line.split("METRIC")[1].strip('"').strip() + score = score_line.split("score")[1].strip() + + return (lt_mv, train, agg_mv, score) + + def get_test_status(result): """ Get the test status/verdict @@ -64,12 +105,32 @@ def get_testcase_result(): return database_list -result = get_testcase_result() +if __name__ == "__main__": + """ + Main function to get the test case results and aggregator logs + And write the results to GitHub step summary + """ + score = "NA" + result = get_testcase_result() + + if not os.getenv("MODEL_NAME"): + print("MODEL_NAME is not set, cannot find out aggregator logs") + else: + (lt_mv, train, agg_mv, score) = get_aggregator_logs(os.getenv("MODEL_NAME")) + + # Write the results to GitHub step summary + with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: + # DO NOT change the print statements + print("| Name | Time (in seconds) | Result | Score (if applicable) |", file=fh) + print("| ------------- | ------------- | ------------- | ------------- |", file=fh) + for item in result: + print(f"| {item['name']} | {item['time']} | {item['result']} | {score} |", file=fh) + print("", file=fh) -# Write the results to GitHub step summary -with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: - # DO NOT change the print statements - print("| Name | Time (in seconds) | Result |", file=fh) - print("| ------------- | ------------- | ------------- |", file=fh) - for item in result: - print(f"| {item['name']} | {item['time']} | {item['result']} |", file=fh) + # DO NOT change the print statements + if lt_mv and train and agg_mv: + print("| Task | Metric Name | Metric Value | Round |", file=fh) + print("| ------------- | ------------- | ------------- | ------------- |", file=fh) + print(f"| {lt_mv['task_name']} | {lt_mv['metric_name']} | {lt_mv['metric_value']} | {lt_mv['round']} |", file=fh) + print(f"| {train['task_name']} | {train['metric_name']} | {train['metric_value']} | {train['round']} |", file=fh) + print(f"| {agg_mv['task_name']} | {agg_mv['metric_name']} | {agg_mv['metric_value']} | {agg_mv['round']} |", file=fh) From 0ce72f9ff60ef71a38747d0145e3a1c02232fb25 Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 06:20:46 +0000 Subject: [PATCH 32/62] Add collab and rounds to summary Signed-off-by: noopur --- tests/end_to_end/utils/xml_helper.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/end_to_end/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py index 06715c59f1..4b29f00acc 100644 --- a/tests/end_to_end/utils/xml_helper.py +++ b/tests/end_to_end/utils/xml_helper.py @@ -118,19 +118,21 @@ def get_testcase_result(): else: (lt_mv, train, agg_mv, score) = get_aggregator_logs(os.getenv("MODEL_NAME")) + num_cols = os.getenv("NUM_COLLABORATORS") + num_rounds = os.getenv("NUM_ROUNDS") # Write the results to GitHub step summary with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: # DO NOT change the print statements - print("| Name | Time (in seconds) | Result | Score (if applicable) |", file=fh) - print("| ------------- | ------------- | ------------- | ------------- |", file=fh) + print("| Name | Time (in seconds) | Result | Score (if applicable) | Collaborators | Rounds to train |", file=fh) + print("| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |", file=fh) for item in result: - print(f"| {item['name']} | {item['time']} | {item['result']} | {score} |", file=fh) + print(f"| {item['name']} | {item['time']} | {item['result']} | {score} | {num_cols} | {num_rounds} |", file=fh) print("", file=fh) # DO NOT change the print statements if lt_mv and train and agg_mv: print("| Task | Metric Name | Metric Value | Round |", file=fh) print("| ------------- | ------------- | ------------- | ------------- |", file=fh) - print(f"| {lt_mv['task_name']} | {lt_mv['metric_name']} | {lt_mv['metric_value']} | {lt_mv['round']} |", file=fh) - print(f"| {train['task_name']} | {train['metric_name']} | {train['metric_value']} | {train['round']} |", file=fh) - print(f"| {agg_mv['task_name']} | {agg_mv['metric_name']} | {agg_mv['metric_value']} | {agg_mv['round']} |", file=fh) + print(f"| {lt_mv['task_name']} | {lt_mv['metric_name']} | {lt_mv['metric_value']} | {int(lt_mv['round'] + 1)} |", file=fh) + print(f"| {train['task_name']} | {train['metric_name']} | {train['metric_value']} | {int(train['round'] + 1)} |", file=fh) + print(f"| {agg_mv['task_name']} | {agg_mv['metric_name']} | {agg_mv['metric_value']} | {int(agg_mv['round'] + 1)} |", file=fh) From 8bafc0554967e31631f6f9216432457f4cf30cd5 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Wed, 13 Nov 2024 12:45:51 +0530 Subject: [PATCH 33/62] Remove /keys Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/fx.manifest.template | 2 -- 1 file changed, 2 deletions(-) diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template index 23ba9ce61e..9898021f94 100755 --- a/openfl-docker/gramine_app/fx.manifest.template +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -29,7 +29,6 @@ fs.mounts = [ { uri = "file:/usr", path = "/usr" }, { uri = "file:/etc/ssl/certs", path = "/etc/ssl/certs" }, { uri = "file:/workspace", path = "/workspace" }, - { uri = "file:/keys", path = "/keys" }, { uri = "file:/host_save_path", path = "/host_save_path" }, { type = "tmpfs", path = "/tmp" }, ] @@ -63,7 +62,6 @@ sgx.allowed_files = [ "file:/workspace/plan/", "file:/workspace/logs", "file:/workspace/cert", - "file:/keys", "file:/host_save_path", "file:/workspace/data", "file:/workspace/plan/cols.yaml", From 9be298c8329c7f1c74906ea4db1de134d610c441 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Wed, 13 Nov 2024 13:09:01 +0530 Subject: [PATCH 34/62] Remove /host_save_path Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/fx.manifest.template | 2 -- 1 file changed, 2 deletions(-) diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template index 9898021f94..da9b3c7c7b 100755 --- a/openfl-docker/gramine_app/fx.manifest.template +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -29,7 +29,6 @@ fs.mounts = [ { uri = "file:/usr", path = "/usr" }, { uri = "file:/etc/ssl/certs", path = "/etc/ssl/certs" }, { uri = "file:/workspace", path = "/workspace" }, - { uri = "file:/host_save_path", path = "/host_save_path" }, { type = "tmpfs", path = "/tmp" }, ] @@ -62,7 +61,6 @@ sgx.allowed_files = [ "file:/workspace/plan/", "file:/workspace/logs", "file:/workspace/cert", - "file:/host_save_path", "file:/workspace/data", "file:/workspace/plan/cols.yaml", "file:/workspace/plan/data.yaml", From 86888a8335ee183f20f4de074beb19b545f62a98 Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 08:14:27 +0000 Subject: [PATCH 35/62] Use only agg accuracy Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- tests/end_to_end/utils/test_summary_helper.py | 124 ++++++++++++++++ tests/end_to_end/utils/xml_helper.py | 138 ------------------ 3 files changed, 125 insertions(+), 139 deletions(-) create mode 100644 tests/end_to_end/utils/test_summary_helper.py delete mode 100644 tests/end_to_end/utils/xml_helper.py diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index a1e52df6a3..e6b0b2edb3 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -78,7 +78,7 @@ jobs: if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' run: | export PYTHONPATH="$PYTHONPATH:." - python tests/end_to_end/utils/xml_helper.py + python tests/end_to_end/utils/test_sumamry_helper.py echo "Test summary printed" - name: Tar files # Tar the test results only if the tests were run diff --git a/tests/end_to_end/utils/test_summary_helper.py b/tests/end_to_end/utils/test_summary_helper.py new file mode 100644 index 0000000000..e82ecfe2c2 --- /dev/null +++ b/tests/end_to_end/utils/test_summary_helper.py @@ -0,0 +1,124 @@ +# Copyright 2020-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import xml.etree.ElementTree as ET +from lxml import etree +import os + +# Initialize the XML parser +parser = etree.XMLParser(recover=True, encoding='utf-8') +tree = ET.parse("results/results.xml", parser=parser) + +# Get the root element +testsuites = tree.getroot() + + +def get_aggregated_accuracy(agg_log_file): + """ + Get the aggregated accuracy from aggregator logs + Args: + agg_log_file: the aggregator log file + Returns: + agg_accuracy: the aggregated accuracy + """ + if not os.path.exists(agg_log_file): + print(f"Aggregator log file {agg_log_file} not found. Cannot get aggregated accuracy") + return "Not Found" + + # Example line(s) containing spaces and special characters: + """ + METRIC {'metric_origin': 'aggregator', 'task_name': 'aggregated_model_validation', 'metric_name': 'accuracy', 'metric_value': aggregator.py:933 + 0.15911591053009033, 'round': 0} + """ + try: + with open(agg_log_file, 'r') as f: + for line in f: + if "metric_origin" in line and "aggregator" in line and "aggregated_model_validation" in line: + line = line.split("aggregator.py:")[0].strip() + # If the line does not contain closing bracket "}", then concatenate the next line + reqd_line = line if "}" in line else line + next(f).strip() + agg_accuracy = eval(reqd_line.split("METRIC")[1].strip('"'))["metric_value"] + return agg_accuracy + + except Exception as e: + # Do not fail the test if the accuracy cannot be fetched + print(f"Error while reading aggregator log file: {e}") + return "Not Found" + + +def get_test_status(result): + """ + Get the test status/verdict + Args + result: the result object to check` + Returns + status of the test status + """ + status = "FAILED" + if "failure" in result.tag or "error" in result.tag: + # If the result has a tag "failure", set status as "FAIL" + status = "FAILED" + elif "skipped" in result.tag: + # If the result has a tag "skipped", set status as "SKIPPED" + status = "SKIPPED" + else: + status = "PASSED" + return status + + +def get_testcase_result(): + """ + Get the test case results from the XML file + """ + database_list = [] + status = None + # Iterate over each testsuite in testsuites + for testsuite in testsuites: + # Populate testcase details in a dictionary + for testcase in testsuite: + database_dict = {} + if testcase.attrib.get("name"): + database_dict["name"] = testcase.attrib.get("name") + database_dict["time"] = testcase.attrib.get("time") + + # Successful test won't have any result/subtag + if len(testcase) == 0: + database_dict["result"] = "PASSED" + + # Iterate over each result in testsuite + for result in testcase: + status = get_test_status(result) + database_dict["result"] = status + + # Append the dictionary to database_list + database_list.append(database_dict) + status = None + + return database_list + + +if __name__ == "__main__": + """ + Main function to get the test case results and aggregator logs + And write the results to GitHub step summary + """ + result = get_testcase_result() + + num_cols = os.getenv("NUM_COLLABORATORS") + num_rounds = os.getenv("NUM_ROUNDS") + model_name = os.getenv("MODEL_NAME") + + if not model_name: + print("MODEL_NAME is not set, cannot find out aggregator logs") + else: + workspace_name = "workspace_" + model_name + agg_log_file = os.path.join("results", workspace_name, "aggregator.log") + agg_accuracy = get_aggregated_accuracy(agg_log_file) + + # Write the results to GitHub step summary + with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: + # DO NOT change the print statements + print("| Name | Time (in seconds) | Result | Collaborators | Rounds to train | Score (if applicable) |", file=fh) + print("| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |", file=fh) + for item in result: + print(f"| {item['name']} | {item['time']} | {item['result']} | {num_cols} | {num_rounds} | {agg_accuracy} |", file=fh) diff --git a/tests/end_to_end/utils/xml_helper.py b/tests/end_to_end/utils/xml_helper.py deleted file mode 100644 index 4b29f00acc..0000000000 --- a/tests/end_to_end/utils/xml_helper.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright 2020-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import xml.etree.ElementTree as ET -from lxml import etree -import os - -# Initialize the XML parser -parser = etree.XMLParser(recover=True, encoding='utf-8') -tree = ET.parse("results/results.xml", parser=parser) - -# Get the root element -testsuites = tree.getroot() - - -def get_aggregator_logs(model_name): - """ - Get the aggregator logs to fetch the metric values and scores - Args: - model_name: the model name for which the aggregator logs are to be fetched - Returns: - tuple: the locally tuned model validation, train, aggregated model validation and score - """ - lt_mv, train, agg_mv, score = None, None, None, "NA" - - workspace_name = "workspace_" + model_name - agg_log_file = os.path.join("results", workspace_name, "aggregator.log") - - if not os.path.exists(agg_log_file): - print(f"Aggregator log file {agg_log_file} not found.") - else: - with open(agg_log_file, 'r') as f: - for raw_line in f: - # Log file contains aggregator.py: which gets concatenated with the actual log line if not stripped - line = raw_line.strip() if "aggregator.py:" not in raw_line else raw_line.split("aggregator.py:")[0].strip() - # Fetch the metric origin and aggregator details - if "metric_origin" in line and "aggregator" in line: - if "locally_tuned_model_validation" in line: - reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() - lt_mv = eval(reqd_line.split("METRIC")[1].strip('"')) - if "train" in line: - reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() - train = eval(reqd_line.split("METRIC")[1].strip('"')) - if "aggregated_model_validation" in line: - reqd_line = line.strip() if "}" in line else line.strip() + next(f).strip() - agg_mv = eval(reqd_line.split("METRIC")[1].strip('"')) - - # Fetch the best model details - if "saved the best model" in line: - reqd_line = line.strip() - score_line = reqd_line.split("METRIC")[1].strip('"').strip() - score = score_line.split("score")[1].strip() - - return (lt_mv, train, agg_mv, score) - - -def get_test_status(result): - """ - Get the test status/verdict - Args - result: the result object to check` - Returns - status of the test status - """ - status = "FAILED" - if "failure" in result.tag or "error" in result.tag: - # If the result has a tag "failure", set status as "FAIL" - status = "FAILED" - elif "skipped" in result.tag: - # If the result has a tag "skipped", set status as "SKIPPED" - status = "SKIPPED" - else: - status = "PASSED" - return status - - -def get_testcase_result(): - """ - Get the test case results from the XML file - """ - database_list = [] - status = None - # Iterate over each testsuite in testsuites - for testsuite in testsuites: - # Populate testcase details in a dictionary - for testcase in testsuite: - database_dict = {} - if testcase.attrib.get("name"): - database_dict["name"] = testcase.attrib.get("name") - database_dict["time"] = testcase.attrib.get("time") - - # Successful test won't have any result/subtag - if len(testcase) == 0: - database_dict["result"] = "PASSED" - - # Iterate over each result in testsuite - for result in testcase: - status = get_test_status(result) - database_dict["result"] = status - - # Append the dictionary to database_list - database_list.append(database_dict) - status = None - - return database_list - - -if __name__ == "__main__": - """ - Main function to get the test case results and aggregator logs - And write the results to GitHub step summary - """ - score = "NA" - result = get_testcase_result() - - if not os.getenv("MODEL_NAME"): - print("MODEL_NAME is not set, cannot find out aggregator logs") - else: - (lt_mv, train, agg_mv, score) = get_aggregator_logs(os.getenv("MODEL_NAME")) - - num_cols = os.getenv("NUM_COLLABORATORS") - num_rounds = os.getenv("NUM_ROUNDS") - # Write the results to GitHub step summary - with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh: - # DO NOT change the print statements - print("| Name | Time (in seconds) | Result | Score (if applicable) | Collaborators | Rounds to train |", file=fh) - print("| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |", file=fh) - for item in result: - print(f"| {item['name']} | {item['time']} | {item['result']} | {score} | {num_cols} | {num_rounds} |", file=fh) - print("", file=fh) - - # DO NOT change the print statements - if lt_mv and train and agg_mv: - print("| Task | Metric Name | Metric Value | Round |", file=fh) - print("| ------------- | ------------- | ------------- | ------------- |", file=fh) - print(f"| {lt_mv['task_name']} | {lt_mv['metric_name']} | {lt_mv['metric_value']} | {int(lt_mv['round'] + 1)} |", file=fh) - print(f"| {train['task_name']} | {train['metric_name']} | {train['metric_value']} | {int(train['round'] + 1)} |", file=fh) - print(f"| {agg_mv['task_name']} | {agg_mv['metric_name']} | {agg_mv['metric_value']} | {int(agg_mv['round'] + 1)} |", file=fh) From 2646607732c9ea333d12242b094d4aac2d58bcbc Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 08:17:51 +0000 Subject: [PATCH 36/62] Typo fix Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index e6b0b2edb3..f1be185a89 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -78,7 +78,7 @@ jobs: if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' run: | export PYTHONPATH="$PYTHONPATH:." - python tests/end_to_end/utils/test_sumamry_helper.py + python tests/end_to_end/utils/test_summary_helper.py echo "Test summary printed" - name: Tar files # Tar the test results only if the tests were run From a05cbf13cae2a1c3aef40bd14b830636782a1019 Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 08:35:23 +0000 Subject: [PATCH 37/62] Renamed summary file Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- tests/end_to_end/models/participants.py | 1 - .../utils/{test_summary_helper.py => summary_helper.py} | 0 3 files changed, 1 insertion(+), 2 deletions(-) rename tests/end_to_end/utils/{test_summary_helper.py => summary_helper.py} (100%) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index f1be185a89..f81869939b 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -78,7 +78,7 @@ jobs: if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' run: | export PYTHONPATH="$PYTHONPATH:." - python tests/end_to_end/utils/test_summary_helper.py + python tests/end_to_end/utils/summary_helper.py echo "Test summary printed" - name: Tar files # Tar the test results only if the tests were run diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 8dfe95b4fc..5dc582a06c 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -2,7 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 import os -from datetime import datetime import yaml import logging diff --git a/tests/end_to_end/utils/test_summary_helper.py b/tests/end_to_end/utils/summary_helper.py similarity index 100% rename from tests/end_to_end/utils/test_summary_helper.py rename to tests/end_to_end/utils/summary_helper.py From 60c06226533246519659f64ebb1eeae86090dd2d Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 08:49:57 +0000 Subject: [PATCH 38/62] Job name as tr to help in ZephyrScale later Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index f81869939b..84753daedf 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -30,7 +30,7 @@ env: jobs: test_run: - name: test + name: tr # do not change this name, it is used in the ZephyrScale fetch logic. runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: From b9466d4f541288d25a775ddff25c6031fd56ce8e Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 09:28:46 +0000 Subject: [PATCH 39/62] Remove comment related to ZS Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 84753daedf..7f7f904aa3 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -30,7 +30,7 @@ env: jobs: test_run: - name: tr # do not change this name, it is used in the ZephyrScale fetch logic. + name: tr runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: From 46c4453ac87d21a91025593fce8d879c9281791d Mon Sep 17 00:00:00 2001 From: gbikkiintel Date: Wed, 13 Nov 2024 08:58:21 -0600 Subject: [PATCH 40/62] TESTING: DON"T MERGE --- .github/workflows/trivy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 0ef7e20e8c..c1d405c971 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -11,7 +11,7 @@ jobs: permissions: contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status + actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status name: Build runs-on: ubuntu-22.04 steps: From 382151f8dc4a5bd61d19a673542c7795486d2e5f Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Wed, 13 Nov 2024 20:33:15 +0530 Subject: [PATCH 41/62] Simplify variables and constants Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/Makefile | 14 +++----- .../gramine_app/fx.manifest.template | 35 ++++++++----------- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/openfl-docker/gramine_app/Makefile b/openfl-docker/gramine_app/Makefile index 5d92ca3666..dbe4d1ce66 100644 --- a/openfl-docker/gramine_app/Makefile +++ b/openfl-docker/gramine_app/Makefile @@ -1,11 +1,9 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 - +VENV_ROOT ?= /opt/venv +WORKSPACE_ROOT ?= /workspace ARCH_LIBDIR ?= /lib/$(shell $(CC) -dumpmachine) -# This is a signer key on the BUILDING machine -SGX_SIGNER_KEY ?= /key.pem - ifeq ($(DEBUG),1) GRAMINE_LOG_LEVEL = debug else @@ -20,14 +18,12 @@ endif fx.manifest: fx.manifest.template @echo "Making fx.manifest file" -# disable checks temp - until gramine is pegged to proper release gramine-manifest \ -Dlog_level=$(GRAMINE_LOG_LEVEL) \ -Darch_libdir=$(ARCH_LIBDIR) \ - -Dno_proxy=$(no_proxy) \ - -Dhttp_proxy=$(http_proxy) \ - -Dhttps_proxy=$(https_proxy) \ - -Dentrypoint=$(shell which fx) \ + -Dvenv_root=$(VENV_ROOT) \ + -Dentrypoint=$(VENV_ROOT)/bin/fx \ + -Dworkspace_root=$(WORKSPACE_ROOT) \ $< >$@ fx.manifest.sgx: fx.manifest diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template index da9b3c7c7b..276a8d1268 100755 --- a/openfl-docker/gramine_app/fx.manifest.template +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -5,30 +5,25 @@ # ================================== libos.entrypoint = "{{ entrypoint }}" -loader.entrypoint.uri = "file:{{ gramine.libos }}" - +loader.entrypoint = "file:{{ gramine.libos }}" loader.log_level = "{{ log_level }}" -loader.env.OMP_NUM_THREADS = "16" -loader.env.LD_LIBRARY_PATH = "{{ arch_libdir }}:/usr/{{ arch_libdir }}:/lib:/usr/lib" -loader.env.SSL_CERT_DIR = "/etc/ssl/certs" -# loader.env.no_proxy = "{{ no_proxy }}" -# loader.env.https_proxy = "{{ https_proxy }}" -# loader.env.http_proxy = "{{ http_proxy }}" - loader.insecure__use_cmdline_argv = true loader.insecure__use_host_env = true +loader.env.LD_LIBRARY_PATH = "{{ venv_root }}:{{ arch_libdir }}:/usr/{{ arch_libdir }}:/lib:/usr/lib" +loader.env.SSL_CERT_DIR = "/etc/ssl/certs" # URI - path on host # PATH - pointer inside gramine -fs.start_dir = "/workspace" +fs.start_dir = "{{ workspace_root }}" fs.mounts = [ { uri = "file:{{ gramine.runtimedir() }}", path = "/lib" }, { uri = "file:{{ arch_libdir }}", path = "{{ arch_libdir }}" }, { uri = "file:/usr", path = "/usr" }, { uri = "file:/etc/ssl/certs", path = "/etc/ssl/certs" }, - { uri = "file:/workspace", path = "/workspace" }, + { uri = "file:{{ workspace_root }}", path = "{{ workspace_root }}" }, + { uri = "file:{{ venv_root }}", path = "{{ venv_root }}" }, { type = "tmpfs", path = "/tmp" }, ] @@ -53,18 +48,18 @@ sgx.trusted_files = [ {% for path in python.get_sys_path('python') %} "file:{{ path }}{{ '/' if path.is_dir() else '' }}", {% endfor %} - "file:/workspace/src/", + "file:{{ venv_root }}/", + "file:{{ workspace_root }}/src/", ] sgx.allowed_files = [ - "file:/workspace/save", - "file:/workspace/plan/", - "file:/workspace/logs", - "file:/workspace/cert", - "file:/workspace/data", - "file:/workspace/plan/cols.yaml", - "file:/workspace/plan/data.yaml", - "file:/workspace/plan/plan.yaml", + "file:{{ workspace_root }}/save", + "file:{{ workspace_root }}/logs", + "file:{{ workspace_root }}/cert", + "file:{{ workspace_root }}/data", + "file:{{ workspace_root }}/plan/cols.yaml", + "file:{{ workspace_root }}/plan/data.yaml", + "file:{{ workspace_root }}/plan/plan.yaml", ] sgx.remote_attestation = "dcap" sgx.max_threads = 512 From 62fdafbc50a817cf4da042454842e7ef2150af26 Mon Sep 17 00:00:00 2001 From: gbikkiintel Date: Wed, 13 Nov 2024 09:09:04 -0600 Subject: [PATCH 42/62] Added trivy db --- .github/workflows/trivy.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index c1d405c971..8a303bd765 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -23,11 +23,13 @@ jobs: docker build --pull -t docker.io/securefederatedai/openfl:${{ github.sha }} -f openfl-docker/Dockerfile.base . - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}' format: 'sarif' output: 'trivy-results.sarif' + env: + TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db' - name: Upload Trivy scan results to GitHub Security tab uses: github/codeql-action/upload-sarif@v2 @@ -56,7 +58,7 @@ jobs: path: trivy-code-results.json - name: Run Trivy vulnerability scanner for Docker image (JSON Output) - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}' format: 'json' @@ -65,6 +67,8 @@ jobs: ignore-unfixed: true vuln-type: 'os,library' severity: 'CRITICAL,HIGH,MEDIUM,LOW' + env: + TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db' - name: Upload Docker Vulnerability Scan uses: actions/upload-artifact@v3 @@ -90,7 +94,7 @@ jobs: path: trivy-code-spdx-results.json - name: Run Trivy vulnerability scanner for Docker image (SPDX-JSON Output) - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}' format: 'spdx-json' @@ -99,6 +103,8 @@ jobs: ignore-unfixed: true vuln-type: 'os,library' severity: 'CRITICAL,HIGH,MEDIUM,LOW' + env: + TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db' - name: Upload Docker Vulnerability Scan uses: actions/upload-artifact@v3 From f25ef3dcccf3bce53b21b15c8e5a8414e66d1b6e Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 17:07:45 +0000 Subject: [PATCH 43/62] Handling for non-TLS scenarios Signed-off-by: noopur --- .../task_runner_e2e_non_tls_client_auth.yml | 93 +++++++++++++++++++ tests/end_to_end/conftest.py | 53 ++++++++--- tests/end_to_end/models/participants.py | 42 ++++++++- .../test_suites/task_runner_tests.py | 9 +- tests/end_to_end/utils/conftest_helper.py | 6 +- tests/end_to_end/utils/federation_helper.py | 1 - 6 files changed, 183 insertions(+), 21 deletions(-) create mode 100644 .github/workflows/task_runner_e2e_non_tls_client_auth.yml diff --git a/.github/workflows/task_runner_e2e_non_tls_client_auth.yml b/.github/workflows/task_runner_e2e_non_tls_client_auth.yml new file mode 100644 index 0000000000..2694b6014e --- /dev/null +++ b/.github/workflows/task_runner_e2e_non_tls_client_auth.yml @@ -0,0 +1,93 @@ +#--------------------------------------------------------------------------- +# Workflow to run Task Runner end to end tests with non TLS client auth +# Authors - Noopur, Payal Chaurasiya +#--------------------------------------------------------------------------- +name: Task Runner E2E With Non-TLS Client Auth + +on: + workflow_dispatch: + inputs: + num_rounds: + description: 'Number of rounds to train' + required: false + default: "5" + type: string + num_collaborators: + description: 'Number of collaborators' + required: false + default: "2" + type: string + +permissions: + contents: read + +# Environment variables common for all the jobs +env: + NUM_ROUNDS: ${{ inputs.num_rounds || '5' }} + NUM_COLLABORATORS: ${{ inputs.num_collaborators || '2' }} + +jobs: + test_run: + name: tr + runs-on: ubuntu-22.04 + timeout-minutes: 120 # 2 hours + strategy: + matrix: + # There are open issues for some of the models, so excluding them for now: + # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] + model_name: [ "torch_cnn_mnist" ] + python_version: [ "3.10" ] + fail-fast: false # do not immediately fail if one of the combinations fail + + env: + MODEL_NAME: ${{ matrix.model_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + + steps: + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r test-requirements.txt + + - name: Run Task Runner E2E tests + id: run_task_runner_tests + run: | + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls + echo "Task runner end to end test run completed" + + - name: Print test summary # Print the test summary only if the tests were run + id: print_test_summary + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' + run: | + export PYTHONPATH="$PYTHONPATH:." + python tests/end_to_end/utils/summary_helper.py + echo "Test summary printed" + + - name: Tar files # Tar the test results only if the tests were run + id: tar_files + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' + run: tar -cvf result.tar results + + - name: Upload Artifacts # Upload the test results only if the tar was created + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: steps.tar_files.outcome == 'success' + with: + name: task_runner_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 3ccffb0e0f..a0d246470f 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -17,7 +17,7 @@ # Define a named tuple to store the objects for model owner, aggregator, and collaborators federation_fixture = collections.namedtuple( "federation_fixture", - "model_owner, aggregator, collaborators, model_name, workspace_path, results_dir", + "model_owner, aggregator, collaborators, model_name, disable_client_auth, disable_tls, workspace_path, results_dir", ) @@ -53,6 +53,16 @@ def pytest_addoption(parser): default=constants.DEFAULT_MODEL_NAME, help="Model name", ) + parser.addoption( + "--disable_client_auth", + action="store_true", + help="Disable client authentication", + ) + parser.addoption( + "--disable_tls", + action="store_true", + help="Disable TLS for communication", + ) @pytest.fixture(scope="session", autouse=True) @@ -213,7 +223,6 @@ def fx_federation(request, pytestconfig): Note: As this is a module level fixture, thus no import is required at test level. """ - log.info("Fixture for federation setup using Task Runner API on single machine.") collaborators = [] agg_domain_name = "localhost" @@ -223,6 +232,17 @@ def fx_federation(request, pytestconfig): results_dir = args.results_dir or pytestconfig.getini("results_dir") num_collaborators = args.num_collaborators num_rounds = args.num_rounds + disable_client_auth = args.disable_client_auth + disable_tls = args.disable_tls + + log.info( + f"Running federation setup using Task Runner API on single machine with below configurations:\n" + f"\tNumber of collaborators: {num_collaborators}\n" + f"\tNumber of rounds: {num_rounds}\n" + f"\tModel name: {model_name}\n" + f"\tClient authentication: {disable_client_auth}\n" + f"\tTLS: {not disable_tls}" + ) # Validate the model name and create the workspace name if not model_name.upper() in constants.ModelName._member_names_: @@ -232,32 +252,40 @@ def fx_federation(request, pytestconfig): # Create model owner object and the workspace for the model model_owner = participants.ModelOwner(workspace_name, model_name) + try: workspace_path = model_owner.create_workspace(results_dir=results_dir) except Exception as e: log.error(f"Failed to create the workspace: {e}") raise e - # Modify and initialize the plan + # Modify the plan try: - model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators) + model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) except Exception as e: log.error(f"Failed to modify the plan: {e}") raise e + # For TLS enabled (default) scenario: when the workspace is certified, the collaborators are registered as well + # For TLS disabled scenario: collaborators need to be registered explicitly + if args.disable_tls: + log.info("Disabling TLS for communication") + model_owner.register_collaborators(num_collaborators) + else: + log.info("Enabling TLS for communication") + try: + model_owner.certify_workspace() + except Exception as e: + log.error(f"Failed to certify the workspace: {e}") + raise e + + # Initialize the plan try: model_owner.initialize_plan(agg_domain_name=agg_domain_name) except Exception as e: log.error(f"Failed to initialize the plan: {e}") raise e - # Modify and initialize the plan - try: - model_owner.certify_workspace() - except Exception as e: - log.error(f"Failed to certify the workspace: {e}") - raise e - # Create the objects for aggregator and collaborators aggregator = participants.Aggregator( agg_domain_name=agg_domain_name, workspace_path=workspace_path @@ -269,6 +297,7 @@ def fx_federation(request, pytestconfig): data_directory_path=i + 1, workspace_path=workspace_path, ) + collaborator.create_collaborator() collaborators.append(collaborator) # Return the federation fixture @@ -277,6 +306,8 @@ def fx_federation(request, pytestconfig): aggregator=aggregator, collaborators=collaborators, model_name=model_name, + disable_client_auth=disable_client_auth, + disable_tls=disable_tls, workspace_path=workspace_path, results_dir=results_dir, ) diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 5dc582a06c..7c49a2a7ac 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -112,17 +112,18 @@ def certify_collaborator(self, collaborator_name): raise e return True - def modify_plan(self, new_rounds=None, num_collaborators=None): + def modify_plan(self, new_rounds=None, num_collaborators=None, disable_client_auth=False, disable_tls=False): """ Modify the plan to train the model Args: new_rounds (int): Number of rounds to train num_collaborators (int): Number of collaborators + disable_client_auth (bool): Disable client authentication + disable_tls (bool): Disable TLS communication Returns: bool: True if successful, else False """ self.plan_path = os.path.join(self.workspace_path, "plan", "plan.yaml") - log.info(f"Modifying the plan at {self.plan_path}") # Open the file and modify the entries self.rounds_to_train = new_rounds if new_rounds else self.rounds_to_train self.num_collaborators = num_collaborators if num_collaborators else self.num_collaborators @@ -132,13 +133,13 @@ def modify_plan(self, new_rounds=None, num_collaborators=None): data["aggregator"]["settings"]["rounds_to_train"] = int(self.rounds_to_train) data["data_loader"]["settings"]["collaborator_count"] = int(self.num_collaborators) + data["network"]["settings"]["disable_client_auth"] = True if disable_client_auth else False + data["network"]["settings"]["tls"] = False if disable_tls else True with open(self.plan_path, "w+") as write_file: yaml.dump(data, write_file) - log.info( - f"Modified the plan to train the model for collaborators {self.num_collaborators} and {self.rounds_to_train} rounds" - ) + log.info(f"Modified the plan at {self.plan_path} with provided parameters.") return True def initialize_plan(self, agg_domain_name): @@ -180,6 +181,37 @@ def certify_workspace(self): raise e return True + def register_collaborators(self, num_collaborators=None): + """ + Register the collaborators + Args: + num_collaborators (int, Optional): Number of collaborators + Returns: + bool: True if successful, else False + """ + self.cols_path = os.path.join(self.workspace_path, "plan", "cols.yaml") + log.info(f"Registering the collaborators in {self.cols_path}") + # Open the file and modify the entries + self.num_collaborators = num_collaborators if num_collaborators else self.num_collaborators + + # Straightforward writing to the yaml file is not recommended here + # As the file might contain spaces and tabs which can cause issues + with open(self.cols_path, "r", encoding="utf-8") as f: + doc = yaml.load(f, Loader=yaml.FullLoader) + + if "collaborators" not in doc.keys() or not doc["collaborators"]: + doc["collaborators"] = [] # Create empty list + + for i in range(num_collaborators): + col_name = "collaborator" + str(i+1) + doc["collaborators"].append(col_name) + with open(self.cols_path, "w", encoding="utf-8") as f: + yaml.dump(doc, f) + + log.info( + f"Modified the plan to train the model for collaborators {self.num_collaborators} and {self.rounds_to_train} rounds" + ) + def certify_aggregator(self, agg_domain_name): """ Certify the aggregator request diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index a80c583acf..371fee8f08 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -17,7 +17,8 @@ def test_torch_cnn_mnist(fx_federation): log.info("Testing torch_cnn_mnist model") # Setup PKI for trusted communication within the federation - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -31,7 +32,8 @@ def test_keras_cnn_mnist(fx_federation): log.info("Testing keras_cnn_mnist model") # Setup PKI for trusted communication within the federation - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) @@ -48,7 +50,8 @@ def test_torch_cnn_histology(fx_federation): log.info("Testing torch_cnn_histology model") # Setup PKI for trusted communication within the federation - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" # Start the federation results = fed_helper.run_federation(fx_federation) diff --git a/tests/end_to_end/utils/conftest_helper.py b/tests/end_to_end/utils/conftest_helper.py index 490a3316db..92a2395a22 100644 --- a/tests/end_to_end/utils/conftest_helper.py +++ b/tests/end_to_end/utils/conftest_helper.py @@ -18,16 +18,20 @@ def parse_arguments(): - num_collaborators (int, default=2): Number of collaborators - num_rounds (int, default=5): Number of rounds to train - model_name (str, default="torch_cnn_mnist"): Model name + - disable_client_auth (bool): Disable client authentication + - disable_tls (bool): Disable TLS for communication Raises: SystemExit: If the required arguments are not provided or if any argument parsing error occurs. """ try: parser = argparse.ArgumentParser(description="Provide the required arguments to run the tests") - parser.add_argument("--results_dir", type=str, required=False, help="Directory to store the results") + parser.add_argument("--results_dir", type=str, required=False, default="results", help="Directory to store the results") parser.add_argument("--num_collaborators", type=int, default=2, help="Number of collaborators") parser.add_argument("--num_rounds", type=int, default=5, help="Number of rounds to train") parser.add_argument("--model_name", type=str, default="torch_cnn_mnist", help="Model name") + parser.add_argument("--disable_client_auth", action="store_true", help="Disable client authentication") + parser.add_argument("--disable_tls", action="store_true", help="Disable TLS for communication") args = parser.parse_known_args()[0] return args diff --git a/tests/end_to_end/utils/federation_helper.py b/tests/end_to_end/utils/federation_helper.py index 3cb091b7ce..1da1c68012 100644 --- a/tests/end_to_end/utils/federation_helper.py +++ b/tests/end_to_end/utils/federation_helper.py @@ -33,7 +33,6 @@ def setup_pki(fed_obj): for collaborator in fed_obj.collaborators: try: log.info(f"Performing operations for {collaborator.collaborator_name}") - collaborator.create_collaborator() collaborator.generate_sign_request() # Below step will add collaborator entries in cols.yaml file. fed_obj.model_owner.certify_collaborator(collaborator.collaborator_name) From a56f8f6574438e2d7bc1c71a841f44721d41c12f Mon Sep 17 00:00:00 2001 From: noopur Date: Wed, 13 Nov 2024 17:11:14 +0000 Subject: [PATCH 44/62] Log correction Signed-off-by: noopur --- tests/end_to_end/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index a0d246470f..12e16f1229 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -240,7 +240,7 @@ def fx_federation(request, pytestconfig): f"\tNumber of collaborators: {num_collaborators}\n" f"\tNumber of rounds: {num_rounds}\n" f"\tModel name: {model_name}\n" - f"\tClient authentication: {disable_client_auth}\n" + f"\tClient authentication: {not disable_client_auth}\n" f"\tTLS: {not disable_tls}" ) From 547d496139dde04e81217d960dba8bbbfa73d125 Mon Sep 17 00:00:00 2001 From: "Rancurel, Vianney" Date: Tue, 12 Nov 2024 13:26:29 -0800 Subject: [PATCH 45/62] deprecate interactive API - Point interactive_api examples to openfl-contrib (where they have been moved) - Mark interactive_api as deprecated in the doc - Remove interactive_api from CI/CD Signed-off-by: Rancurel, Vianney --- .github/dependabot.yml | 6 ----- .github/workflows/ubuntu.yml | 22 +------------------ .github/workflows/windows.yml | 22 +------------------ README.md | 5 +---- docs/about/features.rst | 5 ++--- docs/about/features_index/interactive.rst | 8 +++---- docs/about/features_index/taskrunner.rst | 2 +- docs/about/releases.md | 14 ++++++------ .../advanced_topics/overriding_agg_fn.rst | 4 ++-- .../overriding_plan_settings.rst | 8 +++---- docs/developer_guide/structure/components.rst | 8 +++---- docs/get_started/examples.rst | 10 ++++----- .../examples/interactive_tensorflow_mnist.rst | 6 ++--- .../interactive_api/Flax_CNN_CIFAR/README.md | 0 .../director/director_config.yaml | 0 .../Flax_CNN_CIFAR/director/start_director.sh | 0 .../envoy/cifar10_shard_descriptor.py | 0 .../Flax_CNN_CIFAR/envoy/envoy_config_1.yaml | 0 .../Flax_CNN_CIFAR/envoy/start_envoy.sh | 0 .../Flax_CNN_CIFAR/requirements.txt | 0 .../workspace/FLAX_CIFAR10_CNN.ipynb | 0 .../HPU/PyTorch_Kvasir_UNet/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config_no_gpu.yaml | 0 .../envoy/kvasir_shard_descriptor.py | 0 ...sir_shard_descriptor_with_data_splitter.py | 0 .../envoy/sd_requirements.txt | 0 .../PyTorch_Kvasir_UNet/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/PyTorch_Kvasir_UNet.ipynb | 0 .../PyTorch_Kvasir_UNet/workspace/layers.py | 0 .../HPU/PyTorch_MedMNIST_2D/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/medmnist_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../PyTorch_MedMNIST_2D/envoy/start_envoy.sh | 0 .../workspace/HPU_Pytorch_MedMNIST_2D.ipynb | 0 .../HPU/PyTorch_TinyImageNet/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config_1.yaml | 0 .../envoy/requirements.txt | 0 .../PyTorch_TinyImageNet/envoy/start_envoy.sh | 0 .../envoy/tinyimagenet_shard_descriptor.py | 0 .../workspace/hpu_pytorch_tinyimagenet.ipynb | 0 .../interactive_api/MXNet_landmarks/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/landmark_shard_descriptor.py | 0 .../MXNet_landmarks/envoy/sd_requirements.txt | 0 .../MXNet_landmarks/envoy/start_envoy.sh | 0 .../workspace/MXNet_landmarks.ipynb | 0 .../workspace/mxnet_adapter.py | 0 .../workspace/requirements.txt | 0 .../PyTorch_DogsCats_ViT/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/dogs_cats_shard_descriptor.py | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/sd_requirements.txt | 0 .../PyTorch_DogsCats_ViT/envoy/start_envoy.sh | 0 .../workspace/PyTorch_DogsCats_ViT.ipynb | 0 .../workspace/requirements.txt | 0 .../PyTorch_FedProx_MNIST/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/medmnist_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../Pytorch_FedProx_MedMNIST_2D.ipynb | 0 .../PyTorch_Histology/README.md | 0 .../director/director_config.yaml | 0 .../director/director_config_review_exp.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../PyTorch_Histology/envoy/.gitignore | 0 .../PyTorch_Histology/envoy/envoy_config.yaml | 0 .../envoy/envoy_config_review_exp.yaml | 0 .../envoy/histology_shard_descriptor.py | 0 .../PyTorch_Histology/envoy/requirements.txt | 0 .../PyTorch_Histology/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/pytorch_histology.ipynb | 0 .../PyTorch_Histology_FedCurv/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/.gitignore | 0 .../envoy/envoy_config.yaml | 0 .../envoy/histology_shard_descriptor.py | 0 .../envoy/populate_envoys.sh | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../envoy/start_envoys.sh | 0 .../workspace/.gitignore | 0 .../workspace/pytorch_histology.ipynb | 0 .../README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/sd_requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../envoy/superb_shard_descriptor.py | 0 ...orch_Huggingface_transformers_SUPERB.ipynb | 0 .../PyTorch_Kvasir_UNet/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/envoy_config_no_gpu.yaml | 0 .../envoy/kvasir_shard_descriptor.py | 0 ...sir_shard_descriptor_with_data_splitter.py | 0 .../envoy/sd_requirements.txt | 0 .../PyTorch_Kvasir_UNet/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/PyTorch_Kvasir_UNet.ipynb | 0 .../PyTorch_Kvasir_UNet/workspace/layers.py | 0 .../PyTorch_Lightning_MNIST_GAN/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/envoy_config_no_gpu.yaml | 0 .../envoy/mnist_shard_descriptor.py | 0 .../envoy/sd_requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../workspace/PyTorch_Lightning_GAN.ipynb | 0 .../plugin_for_multiple_optimizers.py | 0 .../PyTorch_LinearRegression/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/regression_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../workspace/requirements.txt | 0 .../workspace/torch_linear_regression.ipynb | 0 .../PyTorch_MVTec_PatchSVDD/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/mvtec_shard_descriptor.py | 0 .../envoy/sd_requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/PatchSVDD_with_Director.ipynb | 0 .../workspace/data_transf.py | 0 .../workspace/inspection.py | 0 .../workspace/utils.py | 0 .../PyTorch_Market_Re-ID/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/market_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../PyTorch_Market_Re-ID/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/PyTorch_Market_Re-ID.ipynb | 0 .../PyTorch_Market_Re-ID/workspace/losses.py | 0 .../workspace/requirements.txt | 0 .../PyTorch_Market_Re-ID/workspace/tools.py | 0 .../workspace/transforms.py | 0 .../PyTorch_MedMNIST_2D/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/medmnist_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../PyTorch_MedMNIST_2D/envoy/start_envoy.sh | 0 .../workspace/Pytorch_MedMNIST_2D.ipynb | 0 .../PyTorch_MedMNIST_3D/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/medmnist_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../PyTorch_MedMNIST_3D/envoy/start_envoy.sh | 0 .../workspace/Pytorch_MedMNIST_3D.ipynb | 0 .../workspace/wspace_utils/__init__.py | 0 .../workspace/wspace_utils/batchnorm.py | 0 .../workspace/wspace_utils/comm.py | 0 .../workspace/wspace_utils/replicate.py | 0 .../workspace/wspace_utils/utils.py | 0 .../PyTorch_TinyImageNet/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/requirements.txt | 0 .../PyTorch_TinyImageNet/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../envoy/tinyimagenet_shard_descriptor.py | 0 .../workspace/non-federated_case.ipynb | 0 .../workspace/pytorch_tinyimagenet.ipynb | 0 .../workspace/requirements.txt | 0 .../PyTorch_TinyImageNet_XPU/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../envoy/tinyimagenet_shard_descriptor.py | 0 .../workspace/pytorch_tinyimagenet_XPU.ipynb | 0 .../workspace/requirements.txt | 0 .../interactive_api/README.md | 0 .../Tensorflow_CIFAR_tfdata/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/cifar10_shard_descriptor.py | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/start_envoy.sh | 0 .../workspace/Tensorflow_CIFAR.ipynb | 0 .../Tensorflow_MNIST/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/mnist_shard_descriptor.py | 0 .../Tensorflow_MNIST/envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../workspace/Tensorflow_MNIST.ipynb | 0 .../Tensorflow_Word_Prediction/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../director/start_director_with_tls.sh | 0 .../envoy/envoy_config_one.yaml | 0 .../envoy/envoy_config_three.yaml | 0 .../envoy/envoy_config_two.yaml | 0 .../envoy/sd_requirements.txt | 0 .../envoy/shard_descriptor.py | 0 .../envoy/start_envoy.sh | 0 .../envoy/start_envoy_with_tls.sh | 0 .../Tensorflow_Word_Prediction.ipynb | 0 .../workspace/requirements.txt | 0 .../jax_linear_regression/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config_1.yaml | 0 .../envoy/regression_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../workspace/JAX_linear_regression.ipynb | 0 .../workspace/custom_adapter.py | 0 .../numpy_linear_regression/README.md | 0 .../director/director_config.yaml | 0 .../envoy/envoy_config.yaml | 0 .../envoy/linreg_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../workspace/LinReg.ipynb | 0 .../workspace/SingleNotebook.ipynb | 0 .../workspace/custom_adapter.py | 0 .../workspace/requirements.txt | 0 .../workspace/start_federation.ipynb | 0 .../scikit_learn_linear_regression/README.md | 0 .../director/director_config.yaml | 0 .../director/start_director.sh | 0 .../envoy/envoy_config.yaml | 0 .../envoy/linreg_shard_descriptor.py | 0 .../envoy/requirements.txt | 0 .../envoy/start_envoy.sh | 0 .../workspace/custom_adapter.py | 0 .../workspace/requirements.txt | 0 .../scikit_learn_linear_regression.ipynb | 0 278 files changed, 35 insertions(+), 85 deletions(-) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/envoy/cifar10_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/envoy/envoy_config_1.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Flax_CNN_CIFAR/workspace/FLAX_CIFAR10_CNN.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/layers.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_MedMNIST_2D/workspace/HPU_Pytorch_MedMNIST_2D.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/envoy/envoy_config_1.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/HPU/PyTorch_TinyImageNet/workspace/hpu_pytorch_tinyimagenet.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/envoy/landmark_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/workspace/MXNet_landmarks.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/workspace/mxnet_adapter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/MXNet_landmarks/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/workspace/PyTorch_DogsCats_ViT.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_DogsCats_ViT/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/envoy/medmnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_FedProx_MNIST/workspace/Pytorch_FedProx_MedMNIST_2D.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/director/director_config_review_exp.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/.gitignore (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/envoy_config_review_exp.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/histology_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology/workspace/pytorch_histology.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/.gitignore (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/histology_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/populate_envoys.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoys.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/workspace/.gitignore (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Histology_FedCurv/workspace/pytorch_histology.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/superb_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Huggingface_transformers_SUPERB/workspace/PyTorch_Huggingface_transformers_SUPERB.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Kvasir_UNet/workspace/layers.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config_no_gpu.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/mnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/PyTorch_Lightning_GAN.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/plugin_for_multiple_optimizers.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/envoy/regression_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_LinearRegression/workspace/torch_linear_regression.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/mvtec_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/PatchSVDD_with_Director.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/data_transf.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/inspection.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/utils.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/market_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/workspace/PyTorch_Market_Re-ID.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/workspace/losses.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/workspace/tools.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_Market_Re-ID/workspace/transforms.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_2D/workspace/Pytorch_MedMNIST_2D.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/envoy/medmnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/Pytorch_MedMNIST_3D.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/__init__.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/batchnorm.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/comm.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/replicate.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/utils.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/workspace/non-federated_case.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/workspace/pytorch_tinyimagenet.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/envoy/tinyimagenet_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/workspace/pytorch_tinyimagenet_XPU.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/PyTorch_TinyImageNet_XPU/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/envoy/cifar10_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/envoy/mnist_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/director/start_director_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_one.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_three.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_two.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/sd_requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy_with_tls.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/envoy/envoy_config_1.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/envoy/regression_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/workspace/JAX_linear_regression.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/jax_linear_regression/workspace/custom_adapter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/envoy/linreg_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/workspace/LinReg.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/workspace/custom_adapter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/numpy_linear_regression/workspace/start_federation.ipynb (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/README.md (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/director/director_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/director/start_director.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/envoy/envoy_config.yaml (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/envoy/linreg_shard_descriptor.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/envoy/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/envoy/start_envoy.sh (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/workspace/custom_adapter.py (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/workspace/requirements.txt (100%) rename openfl-tutorials/{ => deprecated}/interactive_api/scikit_learn_linear_regression/workspace/scikit_learn_linear_regression.ipynb (100%) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index a15810bd5c..641c278909 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -18,9 +18,3 @@ updates: interval: "daily" ignore: - dependency-name: "*" - - package-ecosystem: pip - directory: /openfl-tutorials/interactive_api - schedule: - interval: "daily" - ignore: - - dependency-name: "*" diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index bead9ebbdc..175e71b862 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -47,28 +47,8 @@ jobs: coverage run -m pytest -rA coverage report - interactive-kvasir: # from interactive-kvasir.yml - needs: [lint, pytest-coverage] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v3 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install . - - name: Interactive API - pytorch_kvasir_unet - run: | - python setup.py build_grpc - pip install torch==1.13.1 - pip install torchvision==0.14.1 - python -m tests.github.interactive_api_director.experiments.pytorch_kvasir_unet.run - cli: - needs: [lint, pytest-coverage, interactive-kvasir] + needs: [lint, pytest-coverage] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index a9aac81654..bd751fc10c 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,28 +31,8 @@ jobs: coverage run -m pytest -rA coverage report - interactive-kvasir: # from interactive-kvasir.yml - needs: [pytest-coverage] - runs-on: windows-latest - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.8 - uses: actions/setup-python@v3 - with: - python-version: "3.8" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install . - - name: Interactive API - pytorch_kvasir_unet - run: | - python setup.py build_grpc - pip install torch==1.13.1 - pip install torchvision==0.14.1 - python -m tests.github.interactive_api_director.experiments.pytorch_kvasir_unet.run - cli: # from taskrunner.yml - needs: [pytest-coverage, interactive-kvasir] + needs: [pytest-coverage] runs-on: windows-latest steps: - uses: actions/checkout@v3 diff --git a/README.md b/README.md index 50ef28ba66..aba069a867 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ Coverity Scan Build Status -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/intel/openfl/blob/develop/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb) +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/intel/openfl/blob/develop/openfl-tutorials/Federated_Pytorch_MNIST_Tutorial.ipynb) Open Federated Learning (OpenFL) is a Python 3 framework for Federated Learning. OpenFL is designed to be a _flexible_, _extensible_ and _easily learnable_ tool for data scientists. OpenFL is hosted by The Linux Foundation, aims to be community-driven, and welcomes contributions back to the project. @@ -37,9 +37,6 @@ For more installation options check out the [online documentation](https://openf OpenFL enables data scientists to set up a federated learning experiment following one of the workflows: -- [Director-based Workflow](https://openfl.readthedocs.io/en/latest/about/features_index/interactive.html): -Setup long-lived components to run many experiments in series. Recommended for FL research when many changes to model, dataloader, or hyperparameters are expected - - [Aggregator-based Workflow](https://openfl.readthedocs.io/en/latest/about/features_index/taskrunner.html): Define an experiment and distribute it manually. All participants can verify model code and [FL plan](https://openfl.readthedocs.io/en/latest/about/features_index/taskrunner.html#federated-learning-plan-fl-plan-settings) prior to execution. The federation is terminated when the experiment is finished diff --git a/docs/about/features.rst b/docs/about/features.rst index 4bb5b5f9d3..d57569484f 100644 --- a/docs/about/features.rst +++ b/docs/about/features.rst @@ -23,7 +23,7 @@ Task Runner features_index/taskrunner -Interactive +Interactive (Deprecated) Setup long-lived components to run many experiments in series. Recommended for FL research when many changes to model, dataloader, or hyperparameters are expected. Formerly known as the director-based workflow. For more info see :doc:`features_index/interactive` @@ -83,7 +83,6 @@ FedCurv Use :py:class:`openfl.utilities.fedcurv.torch.FedCurv` to override train function using :code:`.get_penalty()`, :code:`.on_train_begin()`, and :code:`.on_train_end()` methods. In addition, you should override default :code:`AggregationFunction` of the train task with :class:`openfl.interface.aggregation_functions.FedCurvWeightedAverage`. - See :code:`PyTorch_Histology_FedCurv` tutorial in :code:`../openfl-tutorials/interactive_api` directory for more details. .. _federated_evaluation: @@ -110,4 +109,4 @@ Quantitatively audit data privacy in statistical and machine learning algorithms :hidden: features_index/privacy_meter - \ No newline at end of file + diff --git a/docs/about/features_index/interactive.rst b/docs/about/features_index/interactive.rst index 3a37e2992c..ea7654443c 100644 --- a/docs/about/features_index/interactive.rst +++ b/docs/about/features_index/interactive.rst @@ -3,9 +3,9 @@ .. _running_interactive: -================ -Interactive API -================ +============================ +Interactive API (Deprecated) +============================ A director-based workflow uses long-lived components in a federation. These components continue to be available to distribute more experiments in the federation. @@ -670,4 +670,4 @@ Assigner with additional validation round: .. toctree .. overview.how_can_intel_protect_federated_learning -.. overview.what_is_intel_federated_learning \ No newline at end of file +.. overview.what_is_intel_federated_learning diff --git a/docs/about/features_index/taskrunner.rst b/docs/about/features_index/taskrunner.rst index 4e67622088..f4e3cc0730 100644 --- a/docs/about/features_index/taskrunner.rst +++ b/docs/about/features_index/taskrunner.rst @@ -112,7 +112,7 @@ Each task subsection contains the following: .. _running_the_federation_manual: -.. _interactive_api: +.. _interactive_api (Deprecated): diff --git a/docs/about/releases.md b/docs/about/releases.md index b1aa526bcc..22ffc7443b 100644 --- a/docs/about/releases.md +++ b/docs/about/releases.md @@ -11,7 +11,7 @@ - **Workflow API enhancements**: Introducing an experimental [Workspace Export](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/1001_Workspace_Creation_from_JupyterNotebook.ipynb) feature that can be used to transform a Workflow API-based FL experiment into the TaskRunner API format for running in a distributed deployment. There is also groundwork laid for a future FederatedRuntime implementation for Workflow API, in addition to the currently supported LocalRuntime. - **Federated Evaluation**: Federated evaluation allows for the assessment of ML models in a federated learning system by validating the model's performance locally on decentralized collaborator nodes, and then aggregating these metrics to gauge overall effectiveness, without compromising data privacy and security. FE is now officially supported by OpenFL, including [example tutorials](https://openfl.readthedocs.io/en/latest/about/features_index/fed_eval.html) on how to use this new feature (via TaskRunner API). -- **Expanded AI Accelerator Support**: Intel® Data Center GPU Max Series support via the Intel® Extension for PyTorch, including examples for training on datasets such as [MNIST](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/104_MNIST_XPU.ipynb) (via Workflow API) and [TinyImageNet](https://github.com/securefederatedai/openfl/tree/develop/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU) (via Interactive API) +- **Expanded AI Accelerator Support**: Intel® Data Center GPU Max Series support via the Intel® Extension for PyTorch, including examples for training on datasets such as [MNIST](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/experimental/104_MNIST_XPU.ipynb) (via Workflow API) and [TinyImageNet](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU) (via Interactive API) - **Improved straggler collaborator handling**: Improvements and bug fixes to aggregator’s fault-tolerance when collaborators stop responding or drop out of a federation. Introducing a cut-off timer-based policy and enabling other policies to be plugged-in. This capability is particularly relevant for large or geo-distributed federations. @@ -35,9 +35,9 @@ We are excited to announce the release of OpenFL 1.5.1 - our first since moving - **Documentation accessibility improvements**: As part of our [Global Accessibility Awareness Day](https://www.intel.com/content/www/us/en/developer/articles/community/open-fl-project-improve-accessibility-for-devs.html) (GAAD) Pledge, the OpenFL project is making strides towards more accessible documentation. This release includes the integration of [Intel® One Mono](https://www.intel.com/content/www/us/en/company-overview/one-monospace-font.html) font, contrast color improvements, formatting improvements, and [new accessibility focused issues](https://github.com/securefederatedai/openfl/issues?q=is%3Aissue+is%3Aopen+accessibility) to take up in the future. - **[Documentation to federate a Generally Nuanced Deep Learning Framework (GaNDLF) model with OpenFL](https://openfl.readthedocs.io/en/latest/running_the_federation_with_gandlf.html)** - **New OpenFL Interactive API Tutorials**: - - [Linear regression with SciKit-Learn](https://github.com/securefederatedai/openfl/tree/develop/openfl-tutorials/interactive_api/scikit_learn_linear_regression) - - [MedMNIST 2D Classification Using FedProx Optimizer](https://github.com/securefederatedai/openfl/blob/develop/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/README.md?plain=1) - - [PyTorch Linear Regression Example](https://github.com/securefederatedai/openfl/tree/develop/openfl-tutorials/interactive_api/PyTorch_LinearRegression) + - [Linear regression with SciKit-Learn](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression) + - [MedMNIST 2D Classification Using FedProx Optimizer](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecaed/interactive_api/PyTorch_FedProx_MNIST/README.md?plain=1) + - [PyTorch Linear Regression Example](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression) - **Improvements to workspace export and import** - **Many documentation improvements and updates** - **Bug fixes** @@ -54,10 +54,10 @@ We are excited to announce the release of OpenFL 1.5.1 - our first since moving * **[Vertical Federated Learning Examples](https://github.com/intel/openfl/tree/develop/openfl-tutorials/experimental/Vertical_FL)** * **[Federated Model Watermarking](https://github.com/intel/openfl/blob/develop/openfl-tutorials/experimental/301_MNIST_Watermarking.ipynb)** using the [WAFFLE](https://arxiv.org/pdf/2008.07298.pdf) method * **[Differential Privacy](https://github.com/intel/openfl/tree/develop/openfl-tutorials/experimental/Global_DP)** – Global differentially private federated learning using Opacus library to achieve a differentially private result w.r.t the inclusion or exclusion of any collaborator in the training process. At each round, a subset of collaborators are selected using a Poisson distribution over all collaborators, the selected collaborators perform local training with periodic clipping of their model delta (with respect to the current global model) to bound their contribution to the average of local model updates. Gaussian noise is then added to the average of these local models at the aggregator. This example is implemented in two different but statistically equivalent ways – the lower level API utilizes RDPAccountant and DPDataloader Opacus objects to perform privacy accounting and collaborator selection respectively, whereas the higher level API uses PrivacyEngine Opacus object for collaborator selection and internally utilizes RDPAccountant for privacy accounting. -* **[Habana Accelerator Support](https://github.com/intel/openfl/tree/develop/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet)** +* **[Habana Accelerator Support](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet)** * **Official support for Python 3.9 and 3.10** * **[EDEN Compression Pipeline](https://github.com/intel/openfl/blob/develop/openfl/pipelines/eden_pipeline.py)**: Communication-Efficient and Robust Distributed Mean Estimation for Federated Learning ([paper link](https://proceedings.mlr.press/v162/vargaftik22a.html)) -* **[FLAX Framework Support](https://github.com/intel/openfl/tree/develop/openfl-tutorials/interactive_api/Flax_CNN_CIFAR)** +* **[FLAX Framework Support](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR)** * **Improvements to the resiliency and security of the director / envoy infrastructure**: * Optional notification to plan participants to agree to experiment sent to their infrastructure * Improved resistance to loss of network connectivity and failure at various stages of execution @@ -109,7 +109,7 @@ The OpenFL v1.2 release contains the following updates: The OpenFL v1.1 release contains the following updates: -- New [Interactive Python API](https://github.com/intel/openfl/blob/develop/openfl-tutorials/interactive_api_tutorials_(experimental)/Pytorch_Kvasir_UNET_workspace/new_python_api_UNET.ipynb) (experimental) +- New [Interactive Python API](https://github.com/securefederatedai/openfl/tree/main/openfl-tutorials/deprecated/interactive_api) (experimental) - Example FedProx algorithm implementation for PyTorch and Tensorflow - `AggregationFunctionInterface` for custom aggregation functions - Adds a [Keras-based NLP Example](https://github.com/intel/openfl/tree/develop/openfl-workspace/keras_nlp) diff --git a/docs/developer_guide/advanced_topics/overriding_agg_fn.rst b/docs/developer_guide/advanced_topics/overriding_agg_fn.rst index 141b7d1282..ba393f86ca 100644 --- a/docs/developer_guide/advanced_topics/overriding_agg_fn.rst +++ b/docs/developer_guide/advanced_topics/overriding_agg_fn.rst @@ -194,8 +194,8 @@ The following is an example of a **plan.yaml** with a modified aggregation funct - loss -Interactive API -================ +Interactive API (Deprecated) +============================ You can override aggregation function that will be used for the task this function corresponds to. In order to do this, call the ``set_aggregation_function`` decorator method of ``TaskInterface`` and pass ``AggregationFunction`` subclass instance as a parameter. For example, you can try: diff --git a/docs/developer_guide/advanced_topics/overriding_plan_settings.rst b/docs/developer_guide/advanced_topics/overriding_plan_settings.rst index 25c3b59384..629e8a017a 100644 --- a/docs/developer_guide/advanced_topics/overriding_plan_settings.rst +++ b/docs/developer_guide/advanced_topics/overriding_plan_settings.rst @@ -8,7 +8,7 @@ Updating plan settings *********************** With the director-based workflow, you can use custom plan settings before starting the experiment. Changing plan settings in command line interface is straightforward by modifying plan.yaml. -When using Python API or Director Envoy based interactive API, **override_config** can be used to update plan settings. +When using Python API or Director Envoy based interactive API (Deprecated), **override_config** can be used to update plan settings. Python API @@ -24,8 +24,8 @@ Modify the plan settings: }) -Director Envoy Based Interactive API Interface -============================================== +Director Envoy Based Interactive API Interface (Deprecated) +=========================================================== Once you create an FL_experiment object, a basic federated learning plan with default settings is created. To check the default plan settings, print the plan as shown below: .. code-block:: python @@ -96,4 +96,4 @@ Since 'aggregator.settings.db_store_rounds' and 'compression_pipeline.template' INFO Did not find compression_pipeline.settings.n_clusters in config. Make sure it should exist. Creating... native.py:105 -A full implementation can be found at `Federated_Pytorch_MNIST_Tutorial.ipynb `_ and at `Tensorflow_MNIST.ipynb `_. \ No newline at end of file +A full implementation can be found at `Federated_Pytorch_MNIST_Tutorial.ipynb `_ and at `Tensorflow_MNIST.ipynb `_. diff --git a/docs/developer_guide/structure/components.rst b/docs/developer_guide/structure/components.rst index 160c0bb84c..ad2cc140dd 100644 --- a/docs/developer_guide/structure/components.rst +++ b/docs/developer_guide/structure/components.rst @@ -49,7 +49,7 @@ The Collaborator is a short-lived entity that manages training the model on loca - exchanging model parameters with the Aggregator. The Collaborator is created by the :ref:`Envoy ` when a new experiment is submitted -in the :ref:`Director-based workflow `. The Collaborator should be started from CLI if a user follows the +in the :ref:`Director-based workflow ` (Deprecated). The Collaborator should be started from CLI if a user follows the :ref:`Aggregator-based workflow ` Every Collaborator is a unique service. The data loader is loaded with a local *shard descriptor* to perform tasks @@ -67,7 +67,7 @@ they would like see supported in |productName|. Long-Lived Components ====================== -These components were introduced to support the :ref:`Director-based workflow `. +These components were introduced to support the :ref:`Director-based workflow ` (Deprecated). - The *Director* is the central node of the federation. This component starts an *Aggregator* for each experiment, broadcasts experiment archive to connected collaborator nodes, and provides updates on the status. - The *Envoy* runs on collaborator nodes and is always connected to the *Director*. When the *Director* starts an experiment, the *Envoy* starts the *Collaborator* to train the global model. @@ -81,7 +81,7 @@ Director The Director is a long-lived entity and is the central node of the federation. It accepts connections from: - - Frontend clients (data scientists using :ref:`interactive_python_api`) + - Frontend clients (data scientists using :ref:`interactive_python_api`) (Deprecated) - Envoys, if their Shard Descriptors are complient to the same data interface The Director supports concurrent frontend connections. @@ -101,7 +101,7 @@ The Envoy is a long-lived entity that runs on collaborator nodes connected to th Every Envoy is matched to one `shard descriptor `_ in order to run. When the Director starts an experiment, the Envoy accepts the experiment workspace, -prepares the environment, and starts a Collaborator. +prepares the environment, and starts a Collaborator. (Note this approach is deprecated) The envoy is also responsible for sending heartbeat messages to the Director. These messages may also include information regarding collaborator machine resource utilization. Refer to :ref:`device monitor plugin ` for details. diff --git a/docs/get_started/examples.rst b/docs/get_started/examples.rst index 4b9ad39f66..f358090e06 100644 --- a/docs/get_started/examples.rst +++ b/docs/get_started/examples.rst @@ -8,9 +8,9 @@ Examples for Running a Federation ================================= |productName| currently offers four ways to set up and run experiments with a federation: -the Task Runner API, Python Native API, the Interactive API, and the Workflow API. +the Task Runner API, Python Native API, the Interactive API (Deprecated), and the Workflow API. the Task Runner API is advised for production scenarios where the workload needs to be verified prior to execution, whereas the python native API provides a clean python interface on top of it intended for simulation purposes. -The Interactive API introduces a convenient way to set up a federation and brings “long-lived” components in a federation (“Director” and “Envoy”), +The Interactive API (Deprecated) introduces a convenient way to set up a federation and brings “long-lived” components in a federation (“Director” and “Envoy”), while the Task Runner API workflow is advised for scenarios where the workload needs to be verified prior to execution. In contrast, the currently experimental Workflow API is introduced to provide significant flexility to researchers and developers in the construction of federated learning experiments. @@ -43,9 +43,9 @@ See :ref:`python_native_pytorch_mnist` examples/python_native_pytorch_mnist -------------------------- -Interactive API -------------------------- +---------------------------- +Interactive API (Deprecated) +---------------------------- Setup long-lived components to run many experiments See :ref:`interactive_tensorflow_mnist` diff --git a/docs/get_started/examples/interactive_tensorflow_mnist.rst b/docs/get_started/examples/interactive_tensorflow_mnist.rst index 906a396148..0cecfc6b35 100644 --- a/docs/get_started/examples/interactive_tensorflow_mnist.rst +++ b/docs/get_started/examples/interactive_tensorflow_mnist.rst @@ -3,8 +3,8 @@ .. _interactive_tensorflow_mnist: -Interactive API: MNIST Classification Tutorial -=================================================== +Interactive API (Deprecated): MNIST Classification Tutorial +=========================================================== In this tutorial, we will set up a federation and train a basic TensoFlow model on the MNIST dataset using the interactive API. See `full tutorial `_. @@ -371,4 +371,4 @@ Time to start a federated learning experiment .. code-block:: python - fl_experiment.stream_metrics() \ No newline at end of file + fl_experiment.stream_metrics() diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/README.md b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/README.md similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/README.md rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/README.md diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/cifar10_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/cifar10_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/cifar10_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/cifar10_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/envoy_config_1.yaml b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/envoy_config_1.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/envoy_config_1.yaml rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/envoy_config_1.yaml diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/requirements.txt diff --git a/openfl-tutorials/interactive_api/Flax_CNN_CIFAR/workspace/FLAX_CIFAR10_CNN.ipynb b/openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/workspace/FLAX_CIFAR10_CNN.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/Flax_CNN_CIFAR/workspace/FLAX_CIFAR10_CNN.ipynb rename to openfl-tutorials/deprecated/interactive_api/Flax_CNN_CIFAR/workspace/FLAX_CIFAR10_CNN.ipynb diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/README.md b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/README.md similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/README.md rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/README.md diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/layers.py b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/layers.py similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/layers.py rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_Kvasir_UNet/workspace/layers.py diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/README.md b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/README.md similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/README.md rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/README.md diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/workspace/HPU_Pytorch_MedMNIST_2D.ipynb b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/workspace/HPU_Pytorch_MedMNIST_2D.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_MedMNIST_2D/workspace/HPU_Pytorch_MedMNIST_2D.ipynb rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_MedMNIST_2D/workspace/HPU_Pytorch_MedMNIST_2D.ipynb diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/README.md b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/README.md similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/README.md rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/README.md diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/envoy_config_1.yaml b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/envoy_config_1.yaml similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/envoy_config_1.yaml rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/envoy_config_1.yaml diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/workspace/hpu_pytorch_tinyimagenet.ipynb b/openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/workspace/hpu_pytorch_tinyimagenet.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/HPU/PyTorch_TinyImageNet/workspace/hpu_pytorch_tinyimagenet.ipynb rename to openfl-tutorials/deprecated/interactive_api/HPU/PyTorch_TinyImageNet/workspace/hpu_pytorch_tinyimagenet.ipynb diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/README.md b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/README.md similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/README.md rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/README.md diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/envoy/landmark_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/landmark_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/envoy/landmark_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/landmark_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/workspace/MXNet_landmarks.ipynb b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/MXNet_landmarks.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/workspace/MXNet_landmarks.ipynb rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/MXNet_landmarks.ipynb diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/workspace/mxnet_adapter.py b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/mxnet_adapter.py similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/workspace/mxnet_adapter.py rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/mxnet_adapter.py diff --git a/openfl-tutorials/interactive_api/MXNet_landmarks/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/MXNet_landmarks/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/MXNet_landmarks/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/dogs_cats_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/workspace/PyTorch_DogsCats_ViT.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/workspace/PyTorch_DogsCats_ViT.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/workspace/PyTorch_DogsCats_ViT.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/workspace/PyTorch_DogsCats_ViT.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_DogsCats_ViT/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_DogsCats_ViT/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/medmnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/medmnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/medmnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/medmnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/workspace/Pytorch_FedProx_MedMNIST_2D.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/workspace/Pytorch_FedProx_MedMNIST_2D.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_FedProx_MNIST/workspace/Pytorch_FedProx_MedMNIST_2D.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_FedProx_MNIST/workspace/Pytorch_FedProx_MedMNIST_2D.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/director/director_config_review_exp.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/director_config_review_exp.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/director/director_config_review_exp.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/director_config_review_exp.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/.gitignore b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/.gitignore similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/.gitignore rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/.gitignore diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/envoy_config_review_exp.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/envoy_config_review_exp.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/envoy_config_review_exp.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/envoy_config_review_exp.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/histology_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/histology_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/histology_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/histology_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology/workspace/pytorch_histology.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/workspace/pytorch_histology.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology/workspace/pytorch_histology.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology/workspace/pytorch_histology.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/.gitignore b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/.gitignore similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/.gitignore rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/.gitignore diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/histology_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/histology_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/histology_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/histology_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/populate_envoys.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/populate_envoys.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/populate_envoys.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/populate_envoys.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoys.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoys.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoys.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/envoy/start_envoys.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/workspace/.gitignore b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/workspace/.gitignore similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/workspace/.gitignore rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/workspace/.gitignore diff --git a/openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/workspace/pytorch_histology.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/workspace/pytorch_histology.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Histology_FedCurv/workspace/pytorch_histology.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Histology_FedCurv/workspace/pytorch_histology.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/superb_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/superb_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/superb_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/envoy/superb_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/workspace/PyTorch_Huggingface_transformers_SUPERB.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/workspace/PyTorch_Huggingface_transformers_SUPERB.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Huggingface_transformers_SUPERB/workspace/PyTorch_Huggingface_transformers_SUPERB.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Huggingface_transformers_SUPERB/workspace/PyTorch_Huggingface_transformers_SUPERB.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/envoy_config_no_gpu.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/kvasir_shard_descriptor_with_data_splitter.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/workspace/PyTorch_Kvasir_UNet.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/workspace/layers.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/workspace/layers.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Kvasir_UNet/workspace/layers.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Kvasir_UNet/workspace/layers.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config_no_gpu.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config_no_gpu.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config_no_gpu.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/envoy_config_no_gpu.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/mnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/mnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/mnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/mnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/PyTorch_Lightning_GAN.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/PyTorch_Lightning_GAN.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/PyTorch_Lightning_GAN.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/PyTorch_Lightning_GAN.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/plugin_for_multiple_optimizers.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/plugin_for_multiple_optimizers.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/plugin_for_multiple_optimizers.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Lightning_MNIST_GAN/workspace/plugin_for_multiple_optimizers.py diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/regression_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/regression_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/regression_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/regression_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_LinearRegression/workspace/torch_linear_regression.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/workspace/torch_linear_regression.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_LinearRegression/workspace/torch_linear_regression.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_LinearRegression/workspace/torch_linear_regression.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/mvtec_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/mvtec_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/mvtec_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/mvtec_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/PatchSVDD_with_Director.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/PatchSVDD_with_Director.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/PatchSVDD_with_Director.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/PatchSVDD_with_Director.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/data_transf.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/data_transf.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/data_transf.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/data_transf.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/inspection.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/inspection.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/inspection.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/inspection.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/utils.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/utils.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/utils.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MVTec_PatchSVDD/workspace/utils.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/market_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/market_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/market_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/market_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/PyTorch_Market_Re-ID.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/PyTorch_Market_Re-ID.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/PyTorch_Market_Re-ID.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/PyTorch_Market_Re-ID.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/losses.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/losses.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/losses.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/losses.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/tools.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/tools.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/tools.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/tools.py diff --git a/openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/transforms.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/transforms.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_Market_Re-ID/workspace/transforms.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_Market_Re-ID/workspace/transforms.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/medmnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/workspace/Pytorch_MedMNIST_2D.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/workspace/Pytorch_MedMNIST_2D.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_2D/workspace/Pytorch_MedMNIST_2D.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_2D/workspace/Pytorch_MedMNIST_2D.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/medmnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/medmnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/medmnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/medmnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/Pytorch_MedMNIST_3D.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/Pytorch_MedMNIST_3D.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/Pytorch_MedMNIST_3D.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/Pytorch_MedMNIST_3D.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/__init__.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/__init__.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/__init__.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/__init__.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/batchnorm.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/batchnorm.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/batchnorm.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/batchnorm.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/comm.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/comm.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/comm.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/comm.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/replicate.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/replicate.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/replicate.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/replicate.py diff --git a/openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/utils.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/utils.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/utils.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_MedMNIST_3D/workspace/wspace_utils/utils.py diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/envoy/tinyimagenet_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/non-federated_case.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/non-federated_case.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/non-federated_case.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/non-federated_case.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/pytorch_tinyimagenet.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/pytorch_tinyimagenet.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/pytorch_tinyimagenet.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/pytorch_tinyimagenet.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/README.md b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/README.md similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/README.md rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/README.md diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/tinyimagenet_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/tinyimagenet_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/envoy/tinyimagenet_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/envoy/tinyimagenet_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/workspace/pytorch_tinyimagenet_XPU.ipynb b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/workspace/pytorch_tinyimagenet_XPU.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/workspace/pytorch_tinyimagenet_XPU.ipynb rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/workspace/pytorch_tinyimagenet_XPU.ipynb diff --git a/openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/PyTorch_TinyImageNet_XPU/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/PyTorch_TinyImageNet_XPU/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/README.md b/openfl-tutorials/deprecated/interactive_api/README.md similarity index 100% rename from openfl-tutorials/interactive_api/README.md rename to openfl-tutorials/deprecated/interactive_api/README.md diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/README.md b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/README.md similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/README.md rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/README.md diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/cifar10_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/cifar10_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/cifar10_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/cifar10_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb b/openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_CIFAR_tfdata/workspace/Tensorflow_CIFAR.ipynb diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/README.md b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/README.md similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/README.md rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/README.md diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/mnist_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/mnist_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/mnist_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/mnist_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb b/openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_MNIST/workspace/Tensorflow_MNIST.ipynb diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/README.md b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/README.md similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/README.md rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/README.md diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/start_director_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/start_director_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/director/start_director_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/director/start_director_with_tls.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_one.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_one.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_one.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_one.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_three.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_three.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_three.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_three.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_two.yaml b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_two.yaml similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_two.yaml rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/envoy_config_two.yaml diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/sd_requirements.txt b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/sd_requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/sd_requirements.txt rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/sd_requirements.txt diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy_with_tls.sh b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy_with_tls.sh similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy_with_tls.sh rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/envoy/start_envoy_with_tls.sh diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/workspace/Tensorflow_Word_Prediction.ipynb diff --git a/openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/Tensorflow_Word_Prediction/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/README.md b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/README.md similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/README.md rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/README.md diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/envoy/envoy_config_1.yaml b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/envoy_config_1.yaml similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/envoy/envoy_config_1.yaml rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/envoy_config_1.yaml diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/envoy/regression_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/regression_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/envoy/regression_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/regression_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/workspace/JAX_linear_regression.ipynb b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/workspace/JAX_linear_regression.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/workspace/JAX_linear_regression.ipynb rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/workspace/JAX_linear_regression.ipynb diff --git a/openfl-tutorials/interactive_api/jax_linear_regression/workspace/custom_adapter.py b/openfl-tutorials/deprecated/interactive_api/jax_linear_regression/workspace/custom_adapter.py similarity index 100% rename from openfl-tutorials/interactive_api/jax_linear_regression/workspace/custom_adapter.py rename to openfl-tutorials/deprecated/interactive_api/jax_linear_regression/workspace/custom_adapter.py diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/README.md b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/README.md similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/README.md rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/README.md diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/envoy/linreg_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/linreg_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/envoy/linreg_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/linreg_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/LinReg.ipynb b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/LinReg.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/workspace/LinReg.ipynb rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/LinReg.ipynb diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/SingleNotebook.ipynb diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/custom_adapter.py b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/custom_adapter.py similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/workspace/custom_adapter.py rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/custom_adapter.py diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/numpy_linear_regression/workspace/start_federation.ipynb b/openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/start_federation.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/numpy_linear_regression/workspace/start_federation.ipynb rename to openfl-tutorials/deprecated/interactive_api/numpy_linear_regression/workspace/start_federation.ipynb diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/README.md b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/README.md similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/README.md rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/README.md diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/director/director_config.yaml b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/director/director_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/director/director_config.yaml rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/director/director_config.yaml diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/director/start_director.sh b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/director/start_director.sh similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/director/start_director.sh rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/director/start_director.sh diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/envoy_config.yaml b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/envoy_config.yaml similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/envoy_config.yaml rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/envoy_config.yaml diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/linreg_shard_descriptor.py b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/linreg_shard_descriptor.py similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/linreg_shard_descriptor.py rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/linreg_shard_descriptor.py diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/requirements.txt b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/requirements.txt diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/start_envoy.sh b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/start_envoy.sh similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/envoy/start_envoy.sh rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/envoy/start_envoy.sh diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/custom_adapter.py b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/custom_adapter.py similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/custom_adapter.py rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/custom_adapter.py diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/requirements.txt b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/requirements.txt similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/requirements.txt rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/requirements.txt diff --git a/openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/scikit_learn_linear_regression.ipynb b/openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/scikit_learn_linear_regression.ipynb similarity index 100% rename from openfl-tutorials/interactive_api/scikit_learn_linear_regression/workspace/scikit_learn_linear_regression.ipynb rename to openfl-tutorials/deprecated/interactive_api/scikit_learn_linear_regression/workspace/scikit_learn_linear_regression.ipynb From 6044e30b29a35bf4f5c3d8ae3c6f909151be1d90 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 04:53:35 +0000 Subject: [PATCH 46/62] Renamed the workflow Signed-off-by: noopur --- ..._e2e_non_tls_client_auth.yml => task_runner_e2e_wo_mtls.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{task_runner_e2e_non_tls_client_auth.yml => task_runner_e2e_wo_mtls.yml} (98%) diff --git a/.github/workflows/task_runner_e2e_non_tls_client_auth.yml b/.github/workflows/task_runner_e2e_wo_mtls.yml similarity index 98% rename from .github/workflows/task_runner_e2e_non_tls_client_auth.yml rename to .github/workflows/task_runner_e2e_wo_mtls.yml index 2694b6014e..e8f1515645 100644 --- a/.github/workflows/task_runner_e2e_non_tls_client_auth.yml +++ b/.github/workflows/task_runner_e2e_wo_mtls.yml @@ -2,7 +2,7 @@ # Workflow to run Task Runner end to end tests with non TLS client auth # Authors - Noopur, Payal Chaurasiya #--------------------------------------------------------------------------- -name: Task Runner E2E With Non-TLS Client Auth +name: Task Runner E2E W/o mTLS on: workflow_dispatch: From 2aa9394ccf361493c4ef272c059580fd59a4585f Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 06:16:09 +0000 Subject: [PATCH 47/62] Use model name from test function name Signed-off-by: noopur --- .github/workflows/task_runner_e2e_wo_mtls.yml | 2 +- tests/end_to_end/README.md | 19 ++- tests/end_to_end/conftest.py | 113 +++++++++--------- .../test_suites/task_runner_tests.py | 42 +++---- tests/end_to_end/utils/conftest_helper.py | 2 +- 5 files changed, 94 insertions(+), 84 deletions(-) diff --git a/.github/workflows/task_runner_e2e_wo_mtls.yml b/.github/workflows/task_runner_e2e_wo_mtls.yml index e8f1515645..c78e75ca0f 100644 --- a/.github/workflows/task_runner_e2e_wo_mtls.yml +++ b/.github/workflows/task_runner_e2e_wo_mtls.yml @@ -28,7 +28,7 @@ env: jobs: test_run: - name: tr + name: tr_wo_mtls runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: diff --git a/tests/end_to_end/README.md b/tests/end_to_end/README.md index 3971b67986..ae725a170f 100644 --- a/tests/end_to_end/README.md +++ b/tests/end_to_end/README.md @@ -36,15 +36,24 @@ pip install -r test-requirements.txt To run a specific test case, use below command: ```sh -python -m pytest tests/end_to_end/test_suites/ -k -s +python -m pytest -s tests/end_to_end/test_suites/ -k ``` ** -s will ensure all the logs are printed on screen. Ignore, if not required. -To modify the number of collaborators, rounds to train and/or model name, use below parameters: -1. --num_collaborators -2. --num_rounds -3. --model_name +Below parameters are available for modification: + +1. --num_collaborators - to modify the number of collaborators +2. --num_rounds - to modify the number of rounds to train +3. --model_name - to use a specific model +4. --disable_tls - to disable TLS communication (by default it is enabled) +5. --disable_client_auth - to disable the client authentication (by default it is enabled) + +For example, to run Task runner with - torch_cnn_mnist model, 3 collaborators, 5 rounds and non-TLS scenario: + +```sh +python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py --num_rounds 5 --num_collaborators 3 --model_name torch_cnn_mnist --disable_tls +``` ### Output Structure diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 12e16f1229..efe9febbc6 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -50,7 +50,6 @@ def pytest_addoption(parser): "--model_name", action="store", type=str, - default=constants.DEFAULT_MODEL_NAME, help="Model name", ) parser.addoption( @@ -209,7 +208,7 @@ def pytest_sessionfinish(session, exitstatus): log.debug(f"Cleared .pytest_cache directory at {cache_dir}") -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def fx_federation(request, pytestconfig): """ Fixture for federation. This fixture is used to create the model owner, aggregator, and collaborators. @@ -221,14 +220,15 @@ def fx_federation(request, pytestconfig): Returns: federation_fixture: Named tuple containing the objects for model owner, aggregator, and collaborators - Note: As this is a module level fixture, thus no import is required at test level. + Note: As this is a function level fixture, thus no import is required at test level. """ collaborators = [] agg_domain_name = "localhost" # Parse the command line arguments args = parse_arguments() - model_name = args.model_name + # Use the model name from the test case name if not provided as a command line argument + model_name = args.model_name if args.model_name else request.node.name.split("test_")[1] results_dir = args.results_dir or pytestconfig.getini("results_dir") num_collaborators = args.num_collaborators num_rounds = args.num_rounds @@ -249,6 +249,7 @@ def fx_federation(request, pytestconfig): raise ValueError(f"Invalid model name: {model_name}") workspace_name = f"workspace_{model_name}" + log.info(f"Workspace name is: {workspace_name}") # Create model owner object and the workspace for the model model_owner = participants.ModelOwner(workspace_name, model_name) @@ -259,55 +260,55 @@ def fx_federation(request, pytestconfig): log.error(f"Failed to create the workspace: {e}") raise e - # Modify the plan - try: - model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) - except Exception as e: - log.error(f"Failed to modify the plan: {e}") - raise e - - # For TLS enabled (default) scenario: when the workspace is certified, the collaborators are registered as well - # For TLS disabled scenario: collaborators need to be registered explicitly - if args.disable_tls: - log.info("Disabling TLS for communication") - model_owner.register_collaborators(num_collaborators) - else: - log.info("Enabling TLS for communication") - try: - model_owner.certify_workspace() - except Exception as e: - log.error(f"Failed to certify the workspace: {e}") - raise e - - # Initialize the plan - try: - model_owner.initialize_plan(agg_domain_name=agg_domain_name) - except Exception as e: - log.error(f"Failed to initialize the plan: {e}") - raise e - - # Create the objects for aggregator and collaborators - aggregator = participants.Aggregator( - agg_domain_name=agg_domain_name, workspace_path=workspace_path - ) - - for i in range(num_collaborators): - collaborator = participants.Collaborator( - collaborator_name=f"collaborator{i+1}", - data_directory_path=i + 1, - workspace_path=workspace_path, - ) - collaborator.create_collaborator() - collaborators.append(collaborator) - - # Return the federation fixture - return federation_fixture( - model_owner=model_owner, - aggregator=aggregator, - collaborators=collaborators, - model_name=model_name, - disable_client_auth=disable_client_auth, - disable_tls=disable_tls, - workspace_path=workspace_path, - results_dir=results_dir, - ) + # # Modify the plan + # try: + # model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) + # except Exception as e: + # log.error(f"Failed to modify the plan: {e}") + # raise e + + # # For TLS enabled (default) scenario: when the workspace is certified, the collaborators are registered as well + # # For TLS disabled scenario: collaborators need to be registered explicitly + # if args.disable_tls: + # log.info("Disabling TLS for communication") + # model_owner.register_collaborators(num_collaborators) + # else: + # log.info("Enabling TLS for communication") + # try: + # model_owner.certify_workspace() + # except Exception as e: + # log.error(f"Failed to certify the workspace: {e}") + # raise e + + # # Initialize the plan + # try: + # model_owner.initialize_plan(agg_domain_name=agg_domain_name) + # except Exception as e: + # log.error(f"Failed to initialize the plan: {e}") + # raise e + + # # Create the objects for aggregator and collaborators + # aggregator = participants.Aggregator( + # agg_domain_name=agg_domain_name, workspace_path=workspace_path + # ) + + # for i in range(num_collaborators): + # collaborator = participants.Collaborator( + # collaborator_name=f"collaborator{i+1}", + # data_directory_path=i + 1, + # workspace_path=workspace_path, + # ) + # collaborator.create_collaborator() + # collaborators.append(collaborator) + + # # Return the federation fixture + # return federation_fixture( + # model_owner=model_owner, + # aggregator=aggregator, + # collaborators=collaborators, + # model_name=model_name, + # disable_client_auth=disable_client_auth, + # disable_tls=disable_tls, + # workspace_path=workspace_path, + # results_dir=results_dir, + # ) diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 371fee8f08..383c09231f 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -16,30 +16,30 @@ def test_torch_cnn_mnist(fx_federation): """ log.info("Testing torch_cnn_mnist model") - # Setup PKI for trusted communication within the federation - if not fx_federation.disable_tls: - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # # Setup PKI for trusted communication within the federation + # if not fx_federation.disable_tls: + # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # Start the federation - results = fed_helper.run_federation(fx_federation) + # # Start the federation + # results = fed_helper.run_federation(fx_federation) - # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # # Verify the completion of the federation run + # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" @pytest.mark.keras_cnn_mnist def test_keras_cnn_mnist(fx_federation): log.info("Testing keras_cnn_mnist model") - # Setup PKI for trusted communication within the federation - if not fx_federation.disable_tls: - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # # Setup PKI for trusted communication within the federation + # if not fx_federation.disable_tls: + # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # Start the federation - results = fed_helper.run_federation(fx_federation) + # # Start the federation + # results = fed_helper.run_federation(fx_federation) - # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # # Verify the completion of the federation run + # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" @pytest.mark.torch_cnn_histology @@ -49,12 +49,12 @@ def test_torch_cnn_histology(fx_federation): """ log.info("Testing torch_cnn_histology model") - # Setup PKI for trusted communication within the federation - if not fx_federation.disable_tls: - assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # # Setup PKI for trusted communication within the federation + # if not fx_federation.disable_tls: + # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # Start the federation - results = fed_helper.run_federation(fx_federation) + # # Start the federation + # results = fed_helper.run_federation(fx_federation) - # Verify the completion of the federation run - assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # # Verify the completion of the federation run + # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" diff --git a/tests/end_to_end/utils/conftest_helper.py b/tests/end_to_end/utils/conftest_helper.py index 92a2395a22..b8d70fa7ba 100644 --- a/tests/end_to_end/utils/conftest_helper.py +++ b/tests/end_to_end/utils/conftest_helper.py @@ -29,7 +29,7 @@ def parse_arguments(): parser.add_argument("--results_dir", type=str, required=False, default="results", help="Directory to store the results") parser.add_argument("--num_collaborators", type=int, default=2, help="Number of collaborators") parser.add_argument("--num_rounds", type=int, default=5, help="Number of rounds to train") - parser.add_argument("--model_name", type=str, default="torch_cnn_mnist", help="Model name") + parser.add_argument("--model_name", type=str, help="Model name") parser.add_argument("--disable_client_auth", action="store_true", help="Disable client authentication") parser.add_argument("--disable_tls", action="store_true", help="Disable TLS for communication") args = parser.parse_known_args()[0] From 1b5c5139a831d98f3417ce731ee0d61b59d949ee Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 06:24:20 +0000 Subject: [PATCH 48/62] Remove testing code Signed-off-by: noopur --- tests/end_to_end/conftest.py | 110 +++++++++--------- tests/end_to_end/models/participants.py | 36 +++--- tests/end_to_end/test_suites/sample_tests.py | 4 +- .../test_suites/task_runner_tests.py | 42 +++---- 4 files changed, 99 insertions(+), 93 deletions(-) diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index efe9febbc6..193609034e 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -249,66 +249,68 @@ def fx_federation(request, pytestconfig): raise ValueError(f"Invalid model name: {model_name}") workspace_name = f"workspace_{model_name}" - log.info(f"Workspace name is: {workspace_name}") # Create model owner object and the workspace for the model model_owner = participants.ModelOwner(workspace_name, model_name) - try: workspace_path = model_owner.create_workspace(results_dir=results_dir) except Exception as e: log.error(f"Failed to create the workspace: {e}") raise e - # # Modify the plan - # try: - # model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) - # except Exception as e: - # log.error(f"Failed to modify the plan: {e}") - # raise e - - # # For TLS enabled (default) scenario: when the workspace is certified, the collaborators are registered as well - # # For TLS disabled scenario: collaborators need to be registered explicitly - # if args.disable_tls: - # log.info("Disabling TLS for communication") - # model_owner.register_collaborators(num_collaborators) - # else: - # log.info("Enabling TLS for communication") - # try: - # model_owner.certify_workspace() - # except Exception as e: - # log.error(f"Failed to certify the workspace: {e}") - # raise e - - # # Initialize the plan - # try: - # model_owner.initialize_plan(agg_domain_name=agg_domain_name) - # except Exception as e: - # log.error(f"Failed to initialize the plan: {e}") - # raise e - - # # Create the objects for aggregator and collaborators - # aggregator = participants.Aggregator( - # agg_domain_name=agg_domain_name, workspace_path=workspace_path - # ) - - # for i in range(num_collaborators): - # collaborator = participants.Collaborator( - # collaborator_name=f"collaborator{i+1}", - # data_directory_path=i + 1, - # workspace_path=workspace_path, - # ) - # collaborator.create_collaborator() - # collaborators.append(collaborator) - - # # Return the federation fixture - # return federation_fixture( - # model_owner=model_owner, - # aggregator=aggregator, - # collaborators=collaborators, - # model_name=model_name, - # disable_client_auth=disable_client_auth, - # disable_tls=disable_tls, - # workspace_path=workspace_path, - # results_dir=results_dir, - # ) + # Modify the plan + try: + model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) + except Exception as e: + log.error(f"Failed to modify the plan: {e}") + raise e + + # For TLS enabled (default) scenario: when the workspace is certified, the collaborators are registered as well + # For TLS disabled scenario: collaborators need to be registered explicitly + if args.disable_tls: + log.info("Disabling TLS for communication") + try: + model_owner.register_collaborators(num_collaborators) + except Exception as e: + log.error(f"Failed to register the collaborators: {e}") + raise e + else: + log.info("Enabling TLS for communication") + try: + model_owner.certify_workspace() + except Exception as e: + log.error(f"Failed to certify the workspace: {e}") + raise e + + # Initialize the plan + try: + model_owner.initialize_plan(agg_domain_name=agg_domain_name) + except Exception as e: + log.error(f"Failed to initialize the plan: {e}") + raise e + + # Create the objects for aggregator and collaborators + aggregator = participants.Aggregator( + agg_domain_name=agg_domain_name, workspace_path=workspace_path + ) + + for i in range(num_collaborators): + collaborator = participants.Collaborator( + collaborator_name=f"collaborator{i+1}", + data_directory_path=i + 1, + workspace_path=workspace_path, + ) + collaborator.create_collaborator() + collaborators.append(collaborator) + + # Return the federation fixture + return federation_fixture( + model_owner=model_owner, + aggregator=aggregator, + collaborators=collaborators, + model_name=model_name, + disable_client_auth=disable_client_auth, + disable_tls=disable_tls, + workspace_path=workspace_path, + results_dir=results_dir, + ) diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 7c49a2a7ac..0ef22d8f28 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -190,27 +190,31 @@ def register_collaborators(self, num_collaborators=None): bool: True if successful, else False """ self.cols_path = os.path.join(self.workspace_path, "plan", "cols.yaml") - log.info(f"Registering the collaborators in {self.cols_path}") - # Open the file and modify the entries + log.info(f"Registering the collaborators..") self.num_collaborators = num_collaborators if num_collaborators else self.num_collaborators - # Straightforward writing to the yaml file is not recommended here - # As the file might contain spaces and tabs which can cause issues - with open(self.cols_path, "r", encoding="utf-8") as f: - doc = yaml.load(f, Loader=yaml.FullLoader) + try: + # Straightforward writing to the yaml file is not recommended here + # As the file might contain spaces and tabs which can cause issues + with open(self.cols_path, "r", encoding="utf-8") as f: + doc = yaml.load(f, Loader=yaml.FullLoader) - if "collaborators" not in doc.keys() or not doc["collaborators"]: - doc["collaborators"] = [] # Create empty list + if "collaborators" not in doc.keys() or not doc["collaborators"]: + doc["collaborators"] = [] # Create empty list - for i in range(num_collaborators): - col_name = "collaborator" + str(i+1) - doc["collaborators"].append(col_name) - with open(self.cols_path, "w", encoding="utf-8") as f: - yaml.dump(doc, f) + for i in range(num_collaborators): + col_name = "collaborator" + str(i+1) + doc["collaborators"].append(col_name) + with open(self.cols_path, "w", encoding="utf-8") as f: + yaml.dump(doc, f) - log.info( - f"Modified the plan to train the model for collaborators {self.num_collaborators} and {self.rounds_to_train} rounds" - ) + log.info( + f"Successfully registered collaborators in {self.cols_path}" + ) + except Exception as e: + log.error(f"Failed to register the collaborators: {e}") + raise e + return True def certify_aggregator(self, agg_domain_name): """ diff --git a/tests/end_to_end/test_suites/sample_tests.py b/tests/end_to_end/test_suites/sample_tests.py index 7c528277e8..a27bf76cbf 100644 --- a/tests/end_to_end/test_suites/sample_tests.py +++ b/tests/end_to_end/test_suites/sample_tests.py @@ -19,8 +19,8 @@ # 7. Start the federation using aggregator and given no of collaborators. # 8. Verify the completion of the federation run. -@pytest.mark.sample_model -def test_sample_model(fx_federation): +@pytest.mark.sample_model_name +def test_sample_model_name(fx_federation): """ Add a proper docstring here. """ diff --git a/tests/end_to_end/test_suites/task_runner_tests.py b/tests/end_to_end/test_suites/task_runner_tests.py index 383c09231f..371fee8f08 100644 --- a/tests/end_to_end/test_suites/task_runner_tests.py +++ b/tests/end_to_end/test_suites/task_runner_tests.py @@ -16,30 +16,30 @@ def test_torch_cnn_mnist(fx_federation): """ log.info("Testing torch_cnn_mnist model") - # # Setup PKI for trusted communication within the federation - # if not fx_federation.disable_tls: - # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # Setup PKI for trusted communication within the federation + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # # Start the federation - # results = fed_helper.run_federation(fx_federation) + # Start the federation + results = fed_helper.run_federation(fx_federation) - # # Verify the completion of the federation run - # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" @pytest.mark.keras_cnn_mnist def test_keras_cnn_mnist(fx_federation): log.info("Testing keras_cnn_mnist model") - # # Setup PKI for trusted communication within the federation - # if not fx_federation.disable_tls: - # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # Setup PKI for trusted communication within the federation + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # # Start the federation - # results = fed_helper.run_federation(fx_federation) + # Start the federation + results = fed_helper.run_federation(fx_federation) - # # Verify the completion of the federation run - # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" @pytest.mark.torch_cnn_histology @@ -49,12 +49,12 @@ def test_torch_cnn_histology(fx_federation): """ log.info("Testing torch_cnn_histology model") - # # Setup PKI for trusted communication within the federation - # if not fx_federation.disable_tls: - # assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" + # Setup PKI for trusted communication within the federation + if not fx_federation.disable_tls: + assert fed_helper.setup_pki(fx_federation), "Failed to setup PKI for trusted communication" - # # Start the federation - # results = fed_helper.run_federation(fx_federation) + # Start the federation + results = fed_helper.run_federation(fx_federation) - # # Verify the completion of the federation run - # assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" + # Verify the completion of the federation run + assert fed_helper.verify_federation_run_completion(fx_federation, results), "Federation completion failed" From e0e95f97ee6e456945b5119cca3e60daf6ffadf7 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 13:06:20 +0530 Subject: [PATCH 49/62] Cleanup manifest and makefile; less hardcode Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/Makefile | 12 +++++-- .../gramine_app/fx.manifest.template | 36 +++++++++++-------- 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/openfl-docker/gramine_app/Makefile b/openfl-docker/gramine_app/Makefile index dbe4d1ce66..4dbc8ef142 100644 --- a/openfl-docker/gramine_app/Makefile +++ b/openfl-docker/gramine_app/Makefile @@ -1,8 +1,16 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -VENV_ROOT ?= /opt/venv -WORKSPACE_ROOT ?= /workspace +# ------------------------------------ +# Makefile for Gramine application within a container +# Usage: +# 1. Activate the python venv. +# 2. Provide paths VENV_ROOT and WORKSPACE_ROOT. +# 3. make SGX=0/1 [SGX_SIGNER_KEY=] +# ------------------------------------ +VENV_ROOT ?= $(shell dirname $(shell dirname $(shell which python))) +WORKSPACE_ROOT ?= $(shell pwd) ARCH_LIBDIR ?= /lib/$(shell $(CC) -dumpmachine) +SGX_SIGNER_KEY ?= /key.pem ifeq ($(DEBUG),1) GRAMINE_LOG_LEVEL = debug diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template index 276a8d1268..93ad6c0d01 100755 --- a/openfl-docker/gramine_app/fx.manifest.template +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -1,8 +1,9 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# ================================== -# OpenFL Enclave for Gramine-SGX -# ================================== +# ------------------------------------- +# Enclave Manifest for OpenFL TaskRunner API. +# This defines the configuration for the Gramine loader to run a Python application. +# ------------------------------------- libos.entrypoint = "{{ entrypoint }}" loader.entrypoint = "file:{{ gramine.libos }}" @@ -14,28 +15,35 @@ loader.insecure__use_host_env = true loader.env.LD_LIBRARY_PATH = "{{ venv_root }}:{{ arch_libdir }}:/usr/{{ arch_libdir }}:/lib:/usr/lib" loader.env.SSL_CERT_DIR = "/etc/ssl/certs" -# URI - path on host -# PATH - pointer inside gramine +# Filesystem configuration within Gramine LibOS fs.start_dir = "{{ workspace_root }}" fs.mounts = [ + # System mounts (URI: path on host, PATH: pointer inside gramine) { uri = "file:{{ gramine.runtimedir() }}", path = "/lib" }, { uri = "file:{{ arch_libdir }}", path = "{{ arch_libdir }}" }, - { uri = "file:/usr", path = "/usr" }, { uri = "file:/etc/ssl/certs", path = "/etc/ssl/certs" }, + { uri = "file:/usr", path = "/usr" }, + { type = "tmpfs", path = "/tmp" }, + # User-defined mounts specific to the application. { uri = "file:{{ workspace_root }}", path = "{{ workspace_root }}" }, { uri = "file:{{ venv_root }}", path = "{{ venv_root }}" }, - { type = "tmpfs", path = "/tmp" }, ] -sgx.debug = false -sgx.preheat_enclave = false -sgx.enclave_size = "16G" - +# System configuration sys.stack.size = "4M" +sys.brk.max_size = "1M" sys.enable_sigterm_injection = true sys.enable_extra_runtime_domain_names_conf = true -# sys.brk.max_size = "1M" +# SGX configuration +sgx.debug = false +sgx.enclave_size = "16G" +sgx.preheat_enclave = false +sgx.remote_attestation = "dcap" +sgx.max_threads = 512 + +# List of trusted files, that are hashed and signed by the enclave. +# If these files change after signing of an enclave, application cannot run. sgx.trusted_files = [ "file:{{ gramine.libos }}", "file:{{ entrypoint }}", @@ -52,6 +60,8 @@ sgx.trusted_files = [ "file:{{ workspace_root }}/src/", ] +# List of allowed files that SGX enclave does NOT verify with signatures. +# One should be conservative as to which files are allowed, these can be modified by enclave. sgx.allowed_files = [ "file:{{ workspace_root }}/save", "file:{{ workspace_root }}/logs", @@ -61,5 +71,3 @@ sgx.allowed_files = [ "file:{{ workspace_root }}/plan/data.yaml", "file:{{ workspace_root }}/plan/plan.yaml", ] -sgx.remote_attestation = "dcap" -sgx.max_threads = 512 From fe914554ed1f05d02cadb252fd2ffd9ebc918322 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 13:09:41 +0530 Subject: [PATCH 50/62] Make venv default Signed-off-by: Shah, Karan --- openfl-docker/Dockerfile.base | 19 +++++++++++-------- openfl-docker/Dockerfile.workspace | 20 +++++++++++++++----- 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/openfl-docker/Dockerfile.base b/openfl-docker/Dockerfile.base index 139767b544..f58d83747f 100644 --- a/openfl-docker/Dockerfile.base +++ b/openfl-docker/Dockerfile.base @@ -18,12 +18,18 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ curl \ python3-pip \ python3.10-dev \ + python3.10-venv \ ca-certificates \ build-essential \ --no-install-recommends && \ apt-get purge -y linux-libc-dev && \ rm -rf /var/lib/apt/lists/* +# Create a python virtual environment. +RUN python3.10 -m venv /opt/venv && \ + /opt/venv/bin/pip install --no-cache-dir --upgrade pip setuptools wheel +ENV PATH=/opt/venv/bin:$PATH + # Install Gramine RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ curl -fsSLo /usr/share/keyrings/gramine-keyring.gpg https://packages.gramineproject.io/gramine-keyring.gpg && \ @@ -36,17 +42,14 @@ RUN --mount=type=cache,id=apt-dev,target=/var/cache/apt \ apt-get install -y gramine --no-install-recommends && \ rm -rf /var/lib/apt/lists/* +# Install OpenFL. +ARG OPENFL_REVISION=https://github.com/securefederatedai/openfl.git@v1.6 +RUN pip install --no-cache-dir git+${OPENFL_REVISION} && \ + INSTALL_SOURCES=yes /opt/venv/lib/python3.10/site-packages/openfl-docker/licenses.sh + # Create an unprivileged user. RUN groupadd -g 1001 default && \ useradd -m -u 1001 -g default user USER user -WORKDIR /home/user -ENV PATH=/home/user/.local/bin:$PATH - -# Install OpenFL. -ARG OPENFL_REVISION=https://github.com/securefederatedai/openfl.git@v1.6 -RUN pip install --no-cache-dir -U pip setuptools wheel && \ - pip install --no-cache-dir git+${OPENFL_REVISION} && \ - INSTALL_SOURCES=yes /home/user/.local/lib/python3.10/site-packages/openfl-docker/licenses.sh CMD ["/bin/bash"] diff --git a/openfl-docker/Dockerfile.workspace b/openfl-docker/Dockerfile.workspace index 08446663c6..3b644b741a 100644 --- a/openfl-docker/Dockerfile.workspace +++ b/openfl-docker/Dockerfile.workspace @@ -6,13 +6,23 @@ ARG BASE_IMAGE=openfl:latest FROM ${BASE_IMAGE} +USER root SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER user +# Import workspace +WORKDIR / ARG WORKSPACE_NAME -COPY ${WORKSPACE_NAME}.zip . -RUN fx workspace import --archive ${WORKSPACE_NAME}.zip && \ - pip install --no-cache-dir -r ${WORKSPACE_NAME}/requirements.txt +COPY ${WORKSPACE_NAME}.zip /workspace.zip +RUN fx workspace import --archive /workspace.zip && \ + pip install --no-cache-dir -r /workspace/requirements.txt + +# Build enclaves +WORKDIR /workspace +RUN --mount=type=secret,id=signer-key,dst=/key.pem \ + cp -r /opt/venv/lib/python3.10/site-packages/openfl-docker/gramine_app/* /workspace/ && \ + mkdir /mrenclave && \ + make SGX=1 SGX_SIGNER_KEY=/key.pem >> /mrenclave/fx && \ + echo "$(cat /mrenclave/fx)" -WORKDIR /home/user/${WORKSPACE_NAME} +USER user CMD ["/bin/bash"] \ No newline at end of file From 80e34ec3ad690507d9f21d9e58943f89bd267107 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 13:53:25 +0530 Subject: [PATCH 51/62] Remove --sgx-ready flag Signed-off-by: Shah, Karan --- openfl-docker/gramine_app/fx.manifest.template | 2 +- openfl/interface/workspace.py | 15 ++------------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/openfl-docker/gramine_app/fx.manifest.template b/openfl-docker/gramine_app/fx.manifest.template index 93ad6c0d01..928dff0f56 100755 --- a/openfl-docker/gramine_app/fx.manifest.template +++ b/openfl-docker/gramine_app/fx.manifest.template @@ -6,7 +6,7 @@ # ------------------------------------- libos.entrypoint = "{{ entrypoint }}" -loader.entrypoint = "file:{{ gramine.libos }}" +loader.entrypoint.uri = "file:{{ gramine.libos }}" loader.log_level = "{{ log_level }}" loader.insecure__use_cmdline_argv = true diff --git a/openfl/interface/workspace.py b/openfl/interface/workspace.py index 32c264a6a8..2da2b1bc04 100644 --- a/openfl/interface/workspace.py +++ b/openfl/interface/workspace.py @@ -389,21 +389,12 @@ def export_() -> str: default=False, help="If set, rebuilds docker images with `--no-cache` option.", ) -@option( - "--sgx-ready", - is_flag=True, - default=False, - help="If set, builds an SGX-enabled OpenFL enclave.", -) @option( "--enclave-key", "enclave_key", type=str, required=False, - help=( - "Path to an enclave signing key. If not provided, a new key will be generated. " - "This option is only valid when `--sgx-ready` is set." - ), + help="Path to an enclave signing key. If not provided, a new key will be generated. ", ) @option( "--revision", @@ -417,9 +408,7 @@ def export_() -> str: ), ) @pass_context -def dockerize_( - context, save: bool, rebuild: bool, sgx_ready: bool, enclave_key: str, revision: str -): +def dockerize_(context, save: bool, rebuild: bool, enclave_key: str, revision: str): """Package current workspace as a Docker image.""" # Docker build options From b773f9b631a23411c1bf0d2897521c66f7c294a3 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 08:48:26 +0000 Subject: [PATCH 52/62] Job name change Signed-off-by: noopur --- .github/workflows/task_runner_e2e_wo_mtls.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/task_runner_e2e_wo_mtls.yml b/.github/workflows/task_runner_e2e_wo_mtls.yml index c78e75ca0f..90b07fe4aa 100644 --- a/.github/workflows/task_runner_e2e_wo_mtls.yml +++ b/.github/workflows/task_runner_e2e_wo_mtls.yml @@ -28,7 +28,7 @@ env: jobs: test_run: - name: tr_wo_mtls + name: tr_non_tls runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: @@ -65,7 +65,7 @@ jobs: pip install . pip install -r test-requirements.txt - - name: Run Task Runner E2E tests + - name: Run Task Runner E2E tests without TLS id: run_task_runner_tests run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls From 5952ef0d9fc30689f3d9df459f661d6e0c91a1f9 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 14:21:36 +0530 Subject: [PATCH 53/62] Store enclave measurement in file Signed-off-by: Shah, Karan --- openfl-docker/Dockerfile.workspace | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/openfl-docker/Dockerfile.workspace b/openfl-docker/Dockerfile.workspace index 3b644b741a..0165f557c1 100644 --- a/openfl-docker/Dockerfile.workspace +++ b/openfl-docker/Dockerfile.workspace @@ -1,7 +1,12 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # ------------------------------------ -# Workspace Image +# Gramine-ready Workspace Image +# Usage: +# $> docker build . -t openfl-workspace -f Dockerfile.workspace \ +# [--build-arg BASE_IMAGE=openfl:latest] \ +# [--build-arg WORKSPACE_NAME=WORKSPACE_NAME] \ +# [--secret id=signer-key,src=signer-key.pem] # ------------------------------------ ARG BASE_IMAGE=openfl:latest FROM ${BASE_IMAGE} @@ -20,9 +25,9 @@ RUN fx workspace import --archive /workspace.zip && \ WORKDIR /workspace RUN --mount=type=secret,id=signer-key,dst=/key.pem \ cp -r /opt/venv/lib/python3.10/site-packages/openfl-docker/gramine_app/* /workspace/ && \ - mkdir /mrenclave && \ - make SGX=1 SGX_SIGNER_KEY=/key.pem >> /mrenclave/fx && \ - echo "$(cat /mrenclave/fx)" + make SGX=1 SGX_SIGNER_KEY=/key.pem >> fx.mr_enclave && \ + echo "$(cat fx.mr_enclave)" && \ + chown -R user /workspace USER user CMD ["/bin/bash"] \ No newline at end of file From f76875e1e4e5ea7ed00906da632a823dce6756ac Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 14:27:09 +0530 Subject: [PATCH 54/62] Rename dockerization.yml to tr_docker_native.yml Signed-off-by: Shah, Karan --- .github/workflows/{dockerization.yml => tr_docker_native.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{dockerization.yml => tr_docker_native.yml} (98%) diff --git a/.github/workflows/dockerization.yml b/.github/workflows/tr_docker_native.yml similarity index 98% rename from .github/workflows/dockerization.yml rename to .github/workflows/tr_docker_native.yml index 81d29f5f2a..b333b54059 100644 --- a/.github/workflows/dockerization.yml +++ b/.github/workflows/tr_docker_native.yml @@ -1,5 +1,5 @@ # Tests an FL experiment in a Dockerized environment. -name: Dockerization +name: Task Runner API (Docker/native) on: pull_request: From 31c209a397945b6b01b87a647d98893a8d0205ac Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 14:27:45 +0530 Subject: [PATCH 55/62] Add TaskRunner Gramine-Direct CI E2E test Signed-off-by: Shah, Karan --- .../workflows/tr_docker_gramine_direct.yml | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 .github/workflows/tr_docker_gramine_direct.yml diff --git a/.github/workflows/tr_docker_gramine_direct.yml b/.github/workflows/tr_docker_gramine_direct.yml new file mode 100644 index 0000000000..c415f9055f --- /dev/null +++ b/.github/workflows/tr_docker_gramine_direct.yml @@ -0,0 +1,97 @@ +# Tests an FL experiment in a Dockerized environment. +name: Task Runner API (Docker/gramine-direct) + +on: + pull_request: + branches: [ develop ] + types: [opened, synchronize, reopened, ready_for_review] + +permissions: + contents: read + +jobs: + build: + if: github.event.pull_request.draft == false + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.8 + uses: actions/setup-python@v3 + with: + python-version: "3.8" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install . + + - name: Create workspace image + run: | + fx workspace create --prefix example_workspace --template keras_cnn_mnist + cd example_workspace + fx plan initialize -a localhost + fx workspace dockerize --save --revision https://github.com/${GITHUB_REPOSITORY}.git@${{ github.event.pull_request.head.sha }} + + - name: Create certificate authority for workspace + run: | + cd example_workspace + fx workspace certify + + - name: Create signed cert for collaborator + run: | + cd example_workspace + fx collaborator create -d 1 -n charlie --silent + fx collaborator generate-cert-request -n charlie --silent + fx collaborator certify --request-pkg col_charlie_to_agg_cert_request.zip --silent + + # Pack the collaborator's private key, signed cert, and data.yaml into a tarball + tarfiles="plan/data.yaml agg_to_col_charlie_signed_cert.zip" + for entry in cert/client/*; do + if [[ "$entry" == *.key ]]; then + tarfiles="$tarfiles $entry" + fi + done + + tar -cf cert_col_charlie.tar $tarfiles + + # Clean up + rm -f $tarfiles + rm -f col_charlie_to_agg_cert_request.zip + + - name: Create signed cert for aggregator + run: | + cd example_workspace + fx aggregator generate-cert-request --fqdn localhost + fx aggregator certify --fqdn localhost --silent + + # Pack all files that aggregator needs to start training + tar -cf cert_agg.tar plan cert save + + # Remove the directories after archiving + rm -rf plan cert save + + - name: Load workspace image + run: | + cd example_workspace + docker load -i example_workspace.tar + + - name: Run aggregator and collaborator + run: | + cd example_workspace + + set -x + docker run --rm \ + --network host \ + --security-opt seccomp=unconfined \ + --mount type=bind,source=./cert_agg.tar,target=/certs.tar \ + --env KERAS_HOME=/tmp \ + example_workspace bash -c "tar -xf /certs.tar && gramine-direct fx aggregator start" & + + # TODO: Run with two collaborators instead. + docker run --rm \ + --network host \ + --security-opt seccomp=unconfined \ + --mount type=bind,source=./cert_col_charlie.tar,target=/certs.tar \ + --env KERAS_HOME=/tmp \ + example_workspace bash -c "tar -xf /certs.tar && fx collaborator certify --import agg_to_col_charlie_signed_cert.zip && gramine-direct fx collaborator start -n charlie" \ No newline at end of file From ff1619cfd3b7de32d3adcfc8ccd08ea711880b48 Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 14:36:05 +0530 Subject: [PATCH 56/62] Update command help Signed-off-by: Shah, Karan --- openfl/interface/workspace.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/openfl/interface/workspace.py b/openfl/interface/workspace.py index 2da2b1bc04..b138ad67db 100644 --- a/openfl/interface/workspace.py +++ b/openfl/interface/workspace.py @@ -394,7 +394,11 @@ def export_() -> str: "enclave_key", type=str, required=False, - help="Path to an enclave signing key. If not provided, a new key will be generated. ", + help=( + "Path to an enclave signing key. If not provided, a key will be auto-generated in the workspace. " + "Note that this command builds a TEE-ready image, key is NOT packaged along with the image. " + "You have the flexibility to not run inside a TEE later." + ), ) @option( "--revision", @@ -409,7 +413,7 @@ def export_() -> str: ) @pass_context def dockerize_(context, save: bool, rebuild: bool, enclave_key: str, revision: str): - """Package current workspace as a Docker image.""" + """Package current workspace as a TEE-ready Docker image.""" # Docker build options options = [] From 12756b12c198560c4af8f2998d98dea24c4d572a Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 14:40:54 +0530 Subject: [PATCH 57/62] Rename tests Signed-off-by: Shah, Karan --- .github/workflows/tr_docker_gramine_direct.yml | 2 +- .github/workflows/tr_docker_native.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tr_docker_gramine_direct.yml b/.github/workflows/tr_docker_gramine_direct.yml index c415f9055f..d8f7480ea1 100644 --- a/.github/workflows/tr_docker_gramine_direct.yml +++ b/.github/workflows/tr_docker_gramine_direct.yml @@ -1,5 +1,5 @@ # Tests an FL experiment in a Dockerized environment. -name: Task Runner API (Docker/gramine-direct) +name: TaskRunner (docker/gramine-direct) on: pull_request: diff --git a/.github/workflows/tr_docker_native.yml b/.github/workflows/tr_docker_native.yml index b333b54059..899fcd8296 100644 --- a/.github/workflows/tr_docker_native.yml +++ b/.github/workflows/tr_docker_native.yml @@ -1,5 +1,5 @@ # Tests an FL experiment in a Dockerized environment. -name: Task Runner API (Docker/native) +name: TaskRunner (docker/native) on: pull_request: From e4139ae0c2246b54ca297e7a690b20154fa8617b Mon Sep 17 00:00:00 2001 From: "Shah, Karan" Date: Thu, 14 Nov 2024 15:44:58 +0530 Subject: [PATCH 58/62] Add README.md Signed-off-by: Shah, Karan --- openfl-docker/README.md | 89 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 openfl-docker/README.md diff --git a/openfl-docker/README.md b/openfl-docker/README.md new file mode 100644 index 0000000000..da8540770d --- /dev/null +++ b/openfl-docker/README.md @@ -0,0 +1,89 @@ +# Using OpenFL within a Container + +OpenFL can be used within a container for simulating Federated Learning experiments, or to deploy real-world experiments within Trusted Execution Environments (TEEs). + +## Base Image + +To develop or simulate experiments within a container, build the base image (or pull one from docker hub). + +```shell +# Pull latest stable base image +$> docker pull intel/openfl + +# Or, build a base image from the latest source code +$> docker build . -t openfl -f Dockerfile.base \ + --build-arg OPENFL_REVISION=https://github.com/securefederatedai/openfl.git@develop +``` + +Run the container: +```shell +user@vm:~/openfl$ docker run -it --rm openfl:latest bash +user@7b40624c207a:/$ fx +OpenFL - Open Federated Learning + +BASH COMPLETE ACTIVATION + +Run in terminal: + _FX_COMPLETE=bash_source fx > ~/.fx-autocomplete.sh + source ~/.fx-autocomplete.sh +If ~/.fx-autocomplete.sh has already exist: + source ~/.fx-autocomplete.sh + +CORRECT USAGE + +fx [options] [command] [subcommand] [args] +``` + +## Deployment +This section assumes familiarity with the [TaskRunner API](https://openfl.readthedocs.io/en/latest/about/features_index/taskrunner.html#running-the-task-runner). + +### Building a workspace image +OpenFL supports [Gramine-based](https://gramine.readthedocs.io/en/stable/) TEEs that run within SGX. + +To build a TEE-ready workspace image, run the following command from an existing workspace directory. Ensure PKI setup and plan confirmations are done before this step. + +```shell +# Optional, generate an enclave signing key (auto-generated otherwise) +user@vm:~/example_workspace$ openssl genrsa -out key.pem -3 3072 +user@vm:~/example_workspace$ fx workspace dockerize --enclave-key ./key.pem --save +``` +This command builds the base image and a TEE-ready workspace image. Refer to `fx workspace dockerize --help` for more details. + +A signed docker image named `example_workspace.tar` will be saved in the workspace. This image (along with respective PKI certificates) can be shared across participating entities. + +### Running without a TEE +Using native `fx` command within the image will run the experiment without TEEs. + +```shell +# Aggregator +docker run --rm \ + --network host \ + --mount type=bind,source=./certs.tar,target=/certs.tar \ + example_workspace bash -c "fx aggregator start ..." + +# Collaborator(s) +docker run --rm \ + --network host \ + --mount type=bind,source=./certs.tar,target=/certs.tar \ + example_workspace bash -c "fx collaborator start ..." +``` + +### Running within a TEE +To run `fx` within a TEE, mount SGX device and AESMD volumes. In addition, prefix the `fx` command with `gramine-sgx` directive. +```shell +# Aggregator +docker run --rm \ + --network host \ + --device=/dev/sgx_enclave \ + -v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \ + --mount type=bind,source=./certs.tar,target=/certs.tar \ + example_workspace bash -c "gramine-sgx fx aggregator start ..." + +# Collaborator(s) +docker run --rm \ + --network host \ + --device=/dev/sgx_enclave \ + -v /var/run/aesmd/aesm.socket:/var/run/aesmd/aesm.socket \ + --mount type=bind,source=./certs.tar,target=/certs.tar \ + example_workspace bash -c "gramine-sgx fx collaborator start ..." +``` \ No newline at end of file From 6cb5d053b24e6576da0cd1d404ca43734743cdeb Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 13:06:09 +0000 Subject: [PATCH 59/62] Single workflow for TLS and non TLS both Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 104 ++++++++++-------- .github/workflows/task_runner_e2e_wo_mtls.yml | 93 ---------------- tests/end_to_end/conftest.py | 7 +- tests/end_to_end/models/participants.py | 4 +- 4 files changed, 65 insertions(+), 143 deletions(-) delete mode 100644 .github/workflows/task_runner_e2e_wo_mtls.yml diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 7f7f904aa3..ec0310dce7 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -1,3 +1,4 @@ +--- #--------------------------------------------------------------------------- # Workflow to run Task Runner end to end tests # Authors - Noopur, Payal Chaurasiya @@ -6,16 +7,16 @@ name: Task Runner E2E on: schedule: - - cron: '0 0 * * *' # Run every day at midnight + - cron: "0 0 * * *" # Run every day at midnight workflow_dispatch: inputs: num_rounds: - description: 'Number of rounds to train' + description: "Number of rounds to train" required: false default: "5" type: string num_collaborators: - description: 'Number of collaborators' + description: "Number of collaborators" required: false default: "2" type: string @@ -29,7 +30,7 @@ env: NUM_COLLABORATORS: ${{ inputs.num_collaborators || '2' }} jobs: - test_run: + test: name: tr runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours @@ -37,8 +38,9 @@ jobs: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: [ "torch_cnn_mnist", "keras_cnn_mnist" ] - python_version: [ "3.8", "3.9", "3.10" ] + model_name: ["torch_cnn_mnist", "keras_cnn_mnist"] + python_version: ["3.8", "3.9", "3.10"] + tls: [True, False] fail-fast: false # do not immediately fail if one of the combinations fail env: @@ -46,50 +48,58 @@ jobs: PYTHON_VERSION: ${{ matrix.python_version }} steps: - - name: Checkout OpenFL repository - id: checkout_openfl - uses: actions/checkout@v4.1.1 - with: - fetch-depth: 2 # needed for detecting changes - submodules: "true" - token: ${{ secrets.GITHUB_TOKEN }} + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Python - id: setup_python - uses: actions/setup-python@v3 - with: - python-version: ${{ env.PYTHON_VERSION }} + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} - - name: Install dependencies - id: install_dependencies - run: | - python -m pip install --upgrade pip - pip install . - pip install -r test-requirements.txt + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r test-requirements.txt - - name: Run Task Runner E2E tests - id: run_task_runner_tests - run: | - python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} - echo "Task runner end to end test run completed" + - name: Run Task Runner E2E tests + if: ${{ matrix.tls == 'true' }} + id: run_task_runner_tests_tls + run: | + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} + echo "Task runner end to end test run completed" - - name: Print test summary # Print the test summary only if the tests were run - id: print_test_summary - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' - run: | - export PYTHONPATH="$PYTHONPATH:." - python tests/end_to_end/utils/summary_helper.py - echo "Test summary printed" + - name: Run Task Runner E2E tests without TLS + if: ${{ matrix.tls == 'false' }} + id: run_task_runner_tests_non_tls + run: | + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls + echo "Task runner end to end test run completed" - - name: Tar files # Tar the test results only if the tests were run - id: tar_files - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' - run: tar -cvf result.tar results + - name: Print test summary # Print the test summary only if the tests were run + id: print_test_summary + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + run: | + export PYTHONPATH="$PYTHONPATH:." + python tests/end_to_end/utils/summary_helper.py + echo "Test summary printed" - - name: Upload Artifacts # Upload the test results only if the tar was created - id: upload_artifacts - uses: actions/upload-artifact@v4 - if: steps.tar_files.outcome == 'success' - with: - name: task_runner_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} - path: result.tar + - name: Tar files # Tar the test results only if the tests were run + id: tar_files + if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + run: tar -cvf result.tar results + + - name: Upload Artifacts # Upload the test results only if the tar was created + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: steps.tar_files.outcome == 'success' + with: + name: task_runner_tls_${{ matrix.tls }}_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar diff --git a/.github/workflows/task_runner_e2e_wo_mtls.yml b/.github/workflows/task_runner_e2e_wo_mtls.yml deleted file mode 100644 index 90b07fe4aa..0000000000 --- a/.github/workflows/task_runner_e2e_wo_mtls.yml +++ /dev/null @@ -1,93 +0,0 @@ -#--------------------------------------------------------------------------- -# Workflow to run Task Runner end to end tests with non TLS client auth -# Authors - Noopur, Payal Chaurasiya -#--------------------------------------------------------------------------- -name: Task Runner E2E W/o mTLS - -on: - workflow_dispatch: - inputs: - num_rounds: - description: 'Number of rounds to train' - required: false - default: "5" - type: string - num_collaborators: - description: 'Number of collaborators' - required: false - default: "2" - type: string - -permissions: - contents: read - -# Environment variables common for all the jobs -env: - NUM_ROUNDS: ${{ inputs.num_rounds || '5' }} - NUM_COLLABORATORS: ${{ inputs.num_collaborators || '2' }} - -jobs: - test_run: - name: tr_non_tls - runs-on: ubuntu-22.04 - timeout-minutes: 120 # 2 hours - strategy: - matrix: - # There are open issues for some of the models, so excluding them for now: - # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: [ "torch_cnn_mnist" ] - python_version: [ "3.10" ] - fail-fast: false # do not immediately fail if one of the combinations fail - - env: - MODEL_NAME: ${{ matrix.model_name }} - PYTHON_VERSION: ${{ matrix.python_version }} - - steps: - - name: Checkout OpenFL repository - id: checkout_openfl - uses: actions/checkout@v4.1.1 - with: - fetch-depth: 2 # needed for detecting changes - submodules: "true" - token: ${{ secrets.GITHUB_TOKEN }} - - - name: Set up Python - id: setup_python - uses: actions/setup-python@v3 - with: - python-version: ${{ env.PYTHON_VERSION }} - - - name: Install dependencies - id: install_dependencies - run: | - python -m pip install --upgrade pip - pip install . - pip install -r test-requirements.txt - - - name: Run Task Runner E2E tests without TLS - id: run_task_runner_tests - run: | - python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls - echo "Task runner end to end test run completed" - - - name: Print test summary # Print the test summary only if the tests were run - id: print_test_summary - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' - run: | - export PYTHONPATH="$PYTHONPATH:." - python tests/end_to_end/utils/summary_helper.py - echo "Test summary printed" - - - name: Tar files # Tar the test results only if the tests were run - id: tar_files - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' - run: tar -cvf result.tar results - - - name: Upload Artifacts # Upload the test results only if the tar was created - id: upload_artifacts - uses: actions/upload-artifact@v4 - if: steps.tar_files.outcome == 'success' - with: - name: task_runner_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} - path: result.tar diff --git a/tests/end_to_end/conftest.py b/tests/end_to_end/conftest.py index 193609034e..d2c9c20f89 100644 --- a/tests/end_to_end/conftest.py +++ b/tests/end_to_end/conftest.py @@ -260,7 +260,12 @@ def fx_federation(request, pytestconfig): # Modify the plan try: - model_owner.modify_plan(new_rounds=num_rounds, num_collaborators=num_collaborators, disable_tls=disable_tls) + model_owner.modify_plan( + new_rounds=num_rounds, + num_collaborators=num_collaborators, + disable_client_auth=disable_client_auth, + disable_tls=disable_tls, + ) except Exception as e: log.error(f"Failed to modify the plan: {e}") raise e diff --git a/tests/end_to_end/models/participants.py b/tests/end_to_end/models/participants.py index 0ef22d8f28..5bde7f39ec 100644 --- a/tests/end_to_end/models/participants.py +++ b/tests/end_to_end/models/participants.py @@ -133,8 +133,8 @@ def modify_plan(self, new_rounds=None, num_collaborators=None, disable_client_au data["aggregator"]["settings"]["rounds_to_train"] = int(self.rounds_to_train) data["data_loader"]["settings"]["collaborator_count"] = int(self.num_collaborators) - data["network"]["settings"]["disable_client_auth"] = True if disable_client_auth else False - data["network"]["settings"]["tls"] = False if disable_tls else True + data["network"]["settings"]["disable_client_auth"] = disable_client_auth + data["network"]["settings"]["tls"] = not disable_tls with open(self.plan_path, "w+") as write_file: yaml.dump(data, write_file) From ca00053ed053398bc31c4a848fb7f981ea435564 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 13:09:42 +0000 Subject: [PATCH 60/62] Single workflow for TLS and non TLS both Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index ec0310dce7..fdcacee360 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -68,16 +68,17 @@ jobs: python -m pip install --upgrade pip pip install . pip install -r test-requirements.txt + echo ${{ matrix.tls }} - name: Run Task Runner E2E tests - if: ${{ matrix.tls == 'true' }} + if: matrix.tls == 'true' id: run_task_runner_tests_tls run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" - name: Run Task Runner E2E tests without TLS - if: ${{ matrix.tls == 'false' }} + if: matrix.tls == 'false' id: run_task_runner_tests_non_tls run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls From f15e9c50e71dceb208eb5d4c794bbc73ed4c7010 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 13:36:40 +0000 Subject: [PATCH 61/62] Single workflow for TLS and non TLS both Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index fdcacee360..1d74a6640b 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -38,8 +38,8 @@ jobs: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: ["torch_cnn_mnist", "keras_cnn_mnist"] - python_version: ["3.8", "3.9", "3.10"] + model_name: ["torch_cnn_mnist"] + python_version: ["3.8"] tls: [True, False] fail-fast: false # do not immediately fail if one of the combinations fail @@ -71,14 +71,14 @@ jobs: echo ${{ matrix.tls }} - name: Run Task Runner E2E tests - if: matrix.tls == 'true' + if: matrix.tls == true id: run_task_runner_tests_tls run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" - name: Run Task Runner E2E tests without TLS - if: matrix.tls == 'false' + if: matrix.tls == false id: run_task_runner_tests_non_tls run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls From 0591cc24d9bf7690e798bd25f9b10cb57cd1a6e8 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 13:55:18 +0000 Subject: [PATCH 62/62] Non TLS with single model and 3.10 python Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 83 ++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 1d74a6640b..9603db81cf 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -31,16 +31,15 @@ env: jobs: test: - name: tr + name: tr_tls runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: ["torch_cnn_mnist"] - python_version: ["3.8"] - tls: [True, False] + model_name: ["torch_cnn_mnist", "keras_cnn_mnist"] + python_version: ["3.8", "3.9", "3.10"] fail-fast: false # do not immediately fail if one of the combinations fail env: @@ -68,25 +67,81 @@ jobs: python -m pip install --upgrade pip pip install . pip install -r test-requirements.txt - echo ${{ matrix.tls }} - - name: Run Task Runner E2E tests - if: matrix.tls == true - id: run_task_runner_tests_tls + - name: Run Task Runner E2E tests with TLS + id: run_tests run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" + - name: Print test summary # Print the test summary only if the tests were run + id: print_test_summary + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' + run: | + export PYTHONPATH="$PYTHONPATH:." + python tests/end_to_end/utils/summary_helper.py + echo "Test summary printed" + + - name: Tar files # Tar the test results only if the tests were run + id: tar_files + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' + run: tar -cvf result.tar results + + - name: Upload Artifacts # Upload the test results only if the tar was created + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: steps.tar_files.outcome == 'success' + with: + name: task_runner_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar + + test_with_non_tls: + name: tr_non_tls + runs-on: ubuntu-22.04 + timeout-minutes: 120 # 2 hours + strategy: + matrix: + # Testing non TLS scenario only for torch_cnn_mnist model and python 3.10 + # If required, this can be extended to other models and python versions + model_name: ["torch_cnn_mnist"] + python_version: ["3.10"] + fail-fast: false # do not immediately fail if one of the combinations fail + + env: + MODEL_NAME: ${{ matrix.model_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + + steps: + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r test-requirements.txt + - name: Run Task Runner E2E tests without TLS - if: matrix.tls == false - id: run_task_runner_tests_non_tls + id: run_tests run: | - python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --disable_tls echo "Task runner end to end test run completed" - name: Print test summary # Print the test summary only if the tests were run id: print_test_summary - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' run: | export PYTHONPATH="$PYTHONPATH:." python tests/end_to_end/utils/summary_helper.py @@ -94,7 +149,7 @@ jobs: - name: Tar files # Tar the test results only if the tests were run id: tar_files - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' run: tar -cvf result.tar results - name: Upload Artifacts # Upload the test results only if the tar was created @@ -102,5 +157,5 @@ jobs: uses: actions/upload-artifact@v4 if: steps.tar_files.outcome == 'success' with: - name: task_runner_tls_${{ matrix.tls }}_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + name: task_runner_non_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} path: result.tar