Skip to content

Commit

Permalink
Merge branch 'develop' into karansh1/dockerize_refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
MasterSkepticista authored Oct 28, 2024
2 parents 5a9cf18 + 1b051b0 commit d978708
Show file tree
Hide file tree
Showing 20 changed files with 75 additions and 21 deletions.
3 changes: 2 additions & 1 deletion .github/workflows/pytest_coverage.yml
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# This workflow will run code coverage
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Pytest and code coverage

on:
pull_request:
branches: [ develop ]
workflow_dispatch:

permissions:
contents: read
Expand Down
48 changes: 48 additions & 0 deletions .github/workflows/workflow_interface_101_mnist.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
#---------------------------------------------------------------------------
# Workflow to run 101 MNIST Notebook
# Authors - Noopur, Payal Chaurasiya
#---------------------------------------------------------------------------
name: Workflow Interface 101 MNIST Notebook

on:
pull_request:
branches: [ develop ]

workflow_dispatch:

permissions:
contents: read

jobs:
run_notebook:
runs-on: ubuntu-22.04
steps:
- name: Checkout OpenFL repository
uses: actions/[email protected]
with:
fetch-depth: 2 # needed for detecting changes
submodules: "true"
token: ${{ secrets.GITHUB_TOKEN }}

- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: "3.10"

- name: Install Jupyter Lab Package
run: pip install jupyterlab

- name: Run Notebook
run: |
jupyter nbconvert --execute --to notebook ./openfl-tutorials/experimental/101_MNIST.ipynb
echo "Notebook run completed"
- name: Tar files
run: tar -cvf notebook.tar ./openfl-tutorials/experimental/101_MNIST.nbconvert.ipynb

- name: Upload Artifacts
uses: actions/upload-artifact@v4
if: ${{ always() }} # collect artifacts regardless of failures
with:
name: wf_interface_101_mnist_${{ github.run_id }}
path: notebook.tar
8 changes: 4 additions & 4 deletions docs/about/features_index/taskrunner.rst
Original file line number Diff line number Diff line change
Expand Up @@ -303,11 +303,11 @@ Setting Up the Certificate Authority
.. note::

You can override the apparent FQDN of the system by setting an FQDN environment variable before creating the certificate.
You can override the apparent FQDN by setting it explicitly via the :code:`--fqdn` parameter.

.. code-block:: console
$ fx aggregator generate-cert-request export FQDN=x.x.x.x
$ fx aggregator generate-cert-request --fqdn AFQDN
If you omit the :code:`--fdqn` parameter, then :code:`fx` will automatically use the FQDN of the current node assuming the node has been correctly set with a static address.

Expand All @@ -324,11 +324,11 @@ Setting Up the Certificate Authority
.. note::

You can override the apparent FQDN of the system by setting an FQDN environment variable (:code:`export FQDN=x.x.x.x`) before signing the certificate.
You can override the apparent FQDN of the system by setting an FQDN environment variable (:code:`export FQDN=AFQDN`) before signing the certificate.

.. code-block:: console
$ fx aggregator certify export FQDN=x.x.x.x
$ fx aggregator certify --fqdn AFQDN
5. This node now has a signed security certificate as the aggregator for this new federation. You should have the following files.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,17 @@
"metadata": {},
"outputs": [],
"source": [
"# Below code will display the print statement output on screen as well\n",
"import sys\n",
"sys.stdout = open('/dev/stdout', 'w')\n",
"\n",
"!pip install git+https://github.com/securefederatedai/openfl.git\n",
"!pip install -r workflow_interface_requirements.txt\n",
"!pip install torch\n",
"!pip install torchvision\n",
"!pip install -U ipywidgets\n",
"\n",
"# Uncomment this if running in Google Colab\n",
"# Uncomment this if running in Google Colab and set USERNAME if running in docker container.\n",
"# !pip install -r https://raw.githubusercontent.com/intel/openfl/develop/openfl-tutorials/experimental/workflow_interface_requirements.txt\n",
"# import os\n",
"# os.environ[\"USERNAME\"] = \"colab\""
Expand Down Expand Up @@ -386,9 +391,8 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "86b3dd2e",
"id": "9a7cc8f7",
"metadata": {},
"source": [
"Now that the flow has completed, let's get the final model and accuracy"
Expand Down Expand Up @@ -685,7 +689,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.19"
"version": "3.11.5"
}
},
"nbformat": 4,
Expand Down
2 changes: 1 addition & 1 deletion openfl-workspace/gandlf_seg_test/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
onnx==1.16.0
onnx==1.17.0
9 changes: 5 additions & 4 deletions openfl/component/aggregator/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -968,16 +968,17 @@ def _end_of_round_check(self):

# Once all of the task results have been processed
self._end_of_round_check_done[self.round_number] = True

# Save the latest model
self.logger.info("Saving round %s model...", self.round_number)
self._save_model(self.round_number, self.last_state_path)

self.round_number += 1
# resetting stragglers for task for a new round
self.stragglers = []
# resetting collaborators_done for next round
self.collaborators_done = []

# Save the latest model
self.logger.info("Saving round %s model...", self.round_number)
self._save_model(self.round_number, self.last_state_path)

# TODO This needs to be fixed!
if self._time_to_quit():
self.logger.info("Experiment Completed. Cleaning up...")
Expand Down
10 changes: 3 additions & 7 deletions openfl/experimental/utilities/metaflow_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,9 @@ def __init__(self, name):

def __enter__(self):
lock_id = hashlib.new(
"md5", self.name.encode("utf8"), usedforsecurity=False
"sha256", self.name.encode("utf8"), usedforsecurity=False
).hexdigest() # nosec
# MD5sum used for concurrency purposes, not security
# Using SHA-256 to address security warning
self.fp = open(f"/tmp/.lock-{lock_id}.lck", "wb")
fcntl.flock(self.fp.fileno(), fcntl.LOCK_EX)

Expand Down Expand Up @@ -345,11 +345,7 @@ def save_artifacts(self, artifacts_iter, force_v4=False, len_hint=0):

def pickle_iter():
for name, obj in artifacts_iter:
do_v4 = (
force_v4 and force_v4
if isinstance(force_v4, bool)
else force_v4.get(name, False)
)
do_v4 = force_v4 if isinstance(force_v4, bool) else force_v4.get(name, False)
if do_v4:
encode_type = "gzip+pickle-v4"
if encode_type not in self._encodings:
Expand Down
4 changes: 4 additions & 0 deletions openfl/interface/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
"""CLI module."""
import logging
import os
import re
import sys
import time
import warnings
Expand Down Expand Up @@ -181,6 +182,9 @@ def cli(context, log_level, no_warnings):
# This will be overridden later with user selected debugging level
disable_warnings()
log_file = os.getenv("LOG_FILE")
# Validate log_file using allow list approach
if log_file and not re.match(r"^[\w\-.]+$", log_file):
raise ValueError("Invalid log file path")
setup_logging(log_level, log_file)
sys.stdout.reconfigure(encoding="utf-8")

Expand Down

0 comments on commit d978708

Please sign in to comment.