Skip to content

Task Runner E2E

Task Runner E2E #11

---
#---------------------------------------------------------------------------
# Workflow to run Task Runner end to end tests
# Authors - Noopur, Payal Chaurasiya
#---------------------------------------------------------------------------
name: Task Runner E2E
on:
schedule:
- cron: "0 0 * * *" # Run every day at midnight
workflow_dispatch:
inputs:
num_rounds:
description: "Number of rounds to train"
required: false
default: "5"
type: string
num_collaborators:
description: "Number of collaborators"
required: false
default: "2"
type: string
permissions:
contents: read
# Environment variables common for all the jobs
env:
NUM_ROUNDS: ${{ inputs.num_rounds || '5' }}
NUM_COLLABORATORS: ${{ inputs.num_collaborators || '2' }}
jobs:
test:
name: tr_tls
runs-on: ubuntu-22.04
timeout-minutes: 120 # 2 hours
strategy:
matrix:
# There are open issues for some of the models, so excluding them for now:
# model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ]
model_name: ["torch_cnn_mnist", "keras_cnn_mnist"]
python_version: ["3.8", "3.9", "3.10"]
fail-fast: false # do not immediately fail if one of the combinations fail
env:
MODEL_NAME: ${{ matrix.model_name }}
PYTHON_VERSION: ${{ matrix.python_version }}
steps:
- name: Checkout OpenFL repository
id: checkout_openfl
uses: actions/[email protected]
with:
fetch-depth: 2 # needed for detecting changes
submodules: "true"
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
id: setup_python
uses: actions/setup-python@v3
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
id: install_dependencies
run: |
python -m pip install --upgrade pip
pip install .
pip install -r test-requirements.txt
- name: Run Task Runner E2E tests with TLS
id: run_tests
run: |
python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }}
echo "Task runner end to end test run completed"
- name: Print test summary # Print the test summary only if the tests were run
id: print_test_summary
if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure'
run: |
export PYTHONPATH="$PYTHONPATH:."
python tests/end_to_end/utils/summary_helper.py
echo "Test summary printed"
- name: Tar files # Tar the test results only if the tests were run
id: tar_files
if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure'
run: tar -cvf result.tar results
- name: Upload Artifacts # Upload the test results only if the tar was created
id: upload_artifacts
uses: actions/upload-artifact@v4
if: steps.tar_files.outcome == 'success'
with:
name: task_runner_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }}
path: result.tar
test_with_non_tls:
name: tr_non_tls
runs-on: ubuntu-22.04
timeout-minutes: 120 # 2 hours
strategy:
matrix:
# Testing non TLS scenario only for torch_cnn_mnist model and python 3.10
# If required, this can be extended to other models and python versions
model_name: ["torch_cnn_mnist"]
python_version: ["3.10"]
fail-fast: false # do not immediately fail if one of the combinations fail
env:
MODEL_NAME: ${{ matrix.model_name }}
PYTHON_VERSION: ${{ matrix.python_version }}
steps:
- name: Checkout OpenFL repository
id: checkout_openfl
uses: actions/[email protected]
with:
fetch-depth: 2 # needed for detecting changes
submodules: "true"
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Python
id: setup_python
uses: actions/setup-python@v3
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
id: install_dependencies
run: |
python -m pip install --upgrade pip
pip install .
pip install -r test-requirements.txt
- name: Run Task Runner E2E tests without TLS
id: run_tests
run: |
python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --disable_tls
echo "Task runner end to end test run completed"
- name: Print test summary # Print the test summary only if the tests were run
id: print_test_summary
if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure'
run: |
export PYTHONPATH="$PYTHONPATH:."
python tests/end_to_end/utils/summary_helper.py
echo "Test summary printed"
- name: Tar files # Tar the test results only if the tests were run
id: tar_files
if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure'
run: tar -cvf result.tar results
- name: Upload Artifacts # Upload the test results only if the tar was created
id: upload_artifacts
uses: actions/upload-artifact@v4
if: steps.tar_files.outcome == 'success'
with:
name: task_runner_non_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }}
path: result.tar