Skip to content

Commit

Permalink
Merge branch 'develop' into dockerize_gramine
Browse files Browse the repository at this point in the history
  • Loading branch information
MasterSkepticista authored Nov 14, 2024
2 parents 12756b1 + 4aba7ed commit d5f3f0a
Show file tree
Hide file tree
Showing 7 changed files with 144 additions and 88 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/task_runner_e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ env:

jobs:
test_run:
name: test
name: tr
runs-on: ubuntu-22.04
timeout-minutes: 120 # 2 hours
strategy:
Expand Down Expand Up @@ -78,7 +78,7 @@ jobs:
if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure'
run: |
export PYTHONPATH="$PYTHONPATH:."
python tests/end_to_end/utils/xml_helper.py
python tests/end_to_end/utils/summary_helper.py
echo "Test summary printed"
- name: Tar files # Tar the test results only if the tests were run
Expand Down
14 changes: 10 additions & 4 deletions .github/workflows/trivy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
permissions:
contents: read # for actions/checkout to fetch code
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: Build
runs-on: ubuntu-22.04
steps:
Expand All @@ -23,11 +23,13 @@ jobs:
docker build --pull -t docker.io/securefederatedai/openfl:${{ github.sha }} -f openfl-docker/Dockerfile.base .
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@0.24.0
uses: aquasecurity/trivy-action@0.28.0
with:
image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}'
format: 'sarif'
output: 'trivy-results.sarif'
env:
TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db'

- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
Expand Down Expand Up @@ -56,7 +58,7 @@ jobs:
path: trivy-code-results.json

- name: Run Trivy vulnerability scanner for Docker image (JSON Output)
uses: aquasecurity/trivy-action@0.24.0
uses: aquasecurity/trivy-action@0.28.0
with:
image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}'
format: 'json'
Expand All @@ -65,6 +67,8 @@ jobs:
ignore-unfixed: true
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH,MEDIUM,LOW'
env:
TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db'

- name: Upload Docker Vulnerability Scan
uses: actions/upload-artifact@v3
Expand All @@ -90,7 +94,7 @@ jobs:
path: trivy-code-spdx-results.json

- name: Run Trivy vulnerability scanner for Docker image (SPDX-JSON Output)
uses: aquasecurity/trivy-action@0.24.0
uses: aquasecurity/trivy-action@0.28.0
with:
image-ref: 'docker.io/securefederatedai/openfl:${{ github.sha }}'
format: 'spdx-json'
Expand All @@ -99,6 +103,8 @@ jobs:
ignore-unfixed: true
vuln-type: 'os,library'
severity: 'CRITICAL,HIGH,MEDIUM,LOW'
env:
TRIVY_DB_REPOSITORY: 'ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db'

- name: Upload Docker Vulnerability Scan
uses: actions/upload-artifact@v3
Expand Down
5 changes: 4 additions & 1 deletion openfl/federated/data/loader_gandlf.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ def __init__(self, data_path, feature_shape):
data_path (str): The path to the directory containing the data.
feature_shape (tuple): The shape of an example feature array.
"""
self.train_csv = data_path + "/train.csv"
if "inference" in data_path:
self.train_csv = None
else:
self.train_csv = data_path + "/train.csv"
self.val_csv = data_path + "/valid.csv"
self.train_dataloader = None
self.val_dataloader = None
Expand Down
7 changes: 2 additions & 5 deletions tests/end_to_end/models/participants.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# SPDX-License-Identifier: Apache-2.0

import os
from datetime import datetime
import yaml
import logging

Expand Down Expand Up @@ -292,8 +291,7 @@ def start(self):
"""
try:
log.info(f"Starting {self.name}")
curr_time = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{self.name}_{curr_time}.log"
filename = f"{self.name}.log"
res_file = os.path.join(os.getcwd(), self.workspace_path, filename)
bg_file = open(res_file, "w", buffering=1)

Expand Down Expand Up @@ -412,8 +410,7 @@ def start(self):
"""
try:
log.info(f"Starting {self.collaborator_name}")
curr_time = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{self.collaborator_name}_{curr_time}.log"
filename = f"{self.collaborator_name}.log"
res_file = os.path.join(os.getcwd(), self.workspace_path, filename)
bg_file = open(res_file, "w", buffering=1)

Expand Down
3 changes: 2 additions & 1 deletion tests/end_to_end/utils/federation_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@ def verify_federation_run_completion(fed_obj, results):
for i, participant in enumerate(fed_obj.collaborators + [fed_obj.aggregator])
]

# Result will contain a list of tuple of replica and operator objects.
# Result will contain a list of boolean values for all the participants.
# True - successful completion, False - failed/incomplete
results = [f.result() for f in futures]
log.info(f"Results: {results}")

Expand Down
124 changes: 124 additions & 0 deletions tests/end_to_end/utils/summary_helper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
# Copyright 2020-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import xml.etree.ElementTree as ET
from lxml import etree
import os

# Initialize the XML parser
parser = etree.XMLParser(recover=True, encoding='utf-8')
tree = ET.parse("results/results.xml", parser=parser)

# Get the root element
testsuites = tree.getroot()


def get_aggregated_accuracy(agg_log_file):
"""
Get the aggregated accuracy from aggregator logs
Args:
agg_log_file: the aggregator log file
Returns:
agg_accuracy: the aggregated accuracy
"""
if not os.path.exists(agg_log_file):
print(f"Aggregator log file {agg_log_file} not found. Cannot get aggregated accuracy")
return "Not Found"

# Example line(s) containing spaces and special characters:
"""
METRIC {'metric_origin': 'aggregator', 'task_name': 'aggregated_model_validation', 'metric_name': 'accuracy', 'metric_value': aggregator.py:933
0.15911591053009033, 'round': 0}
"""
try:
with open(agg_log_file, 'r') as f:
for line in f:
if "metric_origin" in line and "aggregator" in line and "aggregated_model_validation" in line:
line = line.split("aggregator.py:")[0].strip()
# If the line does not contain closing bracket "}", then concatenate the next line
reqd_line = line if "}" in line else line + next(f).strip()
agg_accuracy = eval(reqd_line.split("METRIC")[1].strip('"'))["metric_value"]
return agg_accuracy

except Exception as e:
# Do not fail the test if the accuracy cannot be fetched
print(f"Error while reading aggregator log file: {e}")
return "Not Found"


def get_test_status(result):
"""
Get the test status/verdict
Args
result: the result object to check`
Returns
status of the test status
"""
status = "FAILED"
if "failure" in result.tag or "error" in result.tag:
# If the result has a tag "failure", set status as "FAIL"
status = "FAILED"
elif "skipped" in result.tag:
# If the result has a tag "skipped", set status as "SKIPPED"
status = "SKIPPED"
else:
status = "PASSED"
return status


def get_testcase_result():
"""
Get the test case results from the XML file
"""
database_list = []
status = None
# Iterate over each testsuite in testsuites
for testsuite in testsuites:
# Populate testcase details in a dictionary
for testcase in testsuite:
database_dict = {}
if testcase.attrib.get("name"):
database_dict["name"] = testcase.attrib.get("name")
database_dict["time"] = testcase.attrib.get("time")

# Successful test won't have any result/subtag
if len(testcase) == 0:
database_dict["result"] = "PASSED"

# Iterate over each result in testsuite
for result in testcase:
status = get_test_status(result)
database_dict["result"] = status

# Append the dictionary to database_list
database_list.append(database_dict)
status = None

return database_list


if __name__ == "__main__":
"""
Main function to get the test case results and aggregator logs
And write the results to GitHub step summary
"""
result = get_testcase_result()

num_cols = os.getenv("NUM_COLLABORATORS")
num_rounds = os.getenv("NUM_ROUNDS")
model_name = os.getenv("MODEL_NAME")

if not model_name:
print("MODEL_NAME is not set, cannot find out aggregator logs")
else:
workspace_name = "workspace_" + model_name
agg_log_file = os.path.join("results", workspace_name, "aggregator.log")
agg_accuracy = get_aggregated_accuracy(agg_log_file)

# Write the results to GitHub step summary
with open(os.getenv('GITHUB_STEP_SUMMARY'), 'a') as fh:
# DO NOT change the print statements
print("| Name | Time (in seconds) | Result | Collaborators | Rounds to train | Score (if applicable) |", file=fh)
print("| ------------- | ------------- | ------------- | ------------- | ------------- | ------------- |", file=fh)
for item in result:
print(f"| {item['name']} | {item['time']} | {item['result']} | {num_cols} | {num_rounds} | {agg_accuracy} |", file=fh)
75 changes: 0 additions & 75 deletions tests/end_to_end/utils/xml_helper.py

This file was deleted.

0 comments on commit d5f3f0a

Please sign in to comment.