Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

change opt treatment and device policy to enum values #374

Open
wants to merge 10 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 6 additions & 10 deletions openfl-tutorials/Federated_FedProx_Keras_MNIST_Tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"metadata": {},
"outputs": [],
"source": [
"#Install Tensorflow and MNIST dataset if not installed\n",
Expand Down Expand Up @@ -293,13 +291,12 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"metadata": {},
"outputs": [],
"source": [
"#Run experiment, return trained FederatedModel\n",
"final_fl_model = fx.run_experiment(collaborators,override_config={'aggregator.settings.rounds_to_train':5, 'collaborator.settings.opt_treatment': 'CONTINUE_GLOBAL'})"
"from openfl.utilities.enum_types import OptTreatment\n",
"final_fl_model = fx.run_experiment(collaborators,override_config={'aggregator.settings.rounds_to_train':5, 'collaborator.settings.opt_treatment': OptTreatment.CONTINUE_GLOBAL})"
]
},
{
Expand Down Expand Up @@ -354,7 +351,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -367,8 +364,7 @@
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
"pygments_lexer": "ipython3"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -429,13 +429,16 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=MI,\n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=10,\n",
" opt_treatment=\"CONTINUE_GLOBAL\",\n",
" device_assignment_policy=\"CUDA_PREFERRED\",\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED,\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -510,12 +510,15 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI,\n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=5,\n",
" opt_treatment='CONTINUE_GLOBAL',\n",
" device_assignment_policy='CUDA_PREFERRED')"
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -463,12 +463,16 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=model_interface, \n",
" task_keeper=task_interface,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=5,\n",
" opt_treatment='CONTINUE_GLOBAL'\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED,\n",
")"
]
},
Expand All @@ -487,8 +491,22 @@
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python"
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.9"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -474,12 +474,16 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=model_interface, \n",
" task_keeper=task_interface,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=5,\n",
" opt_treatment='CONTINUE_GLOBAL'\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -438,13 +438,16 @@
"metadata": {},
"outputs": [],
"source": [
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=MI,\n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=2,\n",
" opt_treatment=\"CONTINUE_GLOBAL\",\n",
" device_assignment_policy=\"CUDA_PREFERRED\",\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED,\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -484,12 +484,15 @@
"# If I use autoreload I got a pickling error\n",
"\n",
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI, \n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=2,\n",
" opt_treatment='CONTINUE_GLOBAL',\n",
" device_assignment_policy='CUDA_PREFERRED')\n"
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)\n"
]
},
{
Expand Down Expand Up @@ -584,7 +587,7 @@
"source": [
"MI = ModelInterface(model=best_model, optimizer=optimizer_adam, framework_plugin=framework_adapter)\n",
"fl_experiment.start(model_provider=MI, task_keeper=TI, data_loader=fed_dataset, rounds_to_train=4, \\\n",
" opt_treatment='CONTINUE_GLOBAL')"
" opt_treatment=OptTreatment.CONTINUE_GLOBAL)"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -596,13 +596,16 @@
"metadata": {},
"outputs": [],
"source": [
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=MI,\n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=10,\n",
" opt_treatment=\"CONTINUE_GLOBAL\",\n",
" device_assignment_policy=\"CUDA_PREFERRED\",\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED,\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -965,12 +965,15 @@
"# If I use autoreload I got a pickling error\n",
"\n",
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI, \n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=10,\n",
" opt_treatment='CONTINUE_GLOBAL',\n",
" device_assignment_policy='CUDA_PREFERRED')\n"
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)\n"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -543,11 +543,15 @@
"# If I use autoreload I got a pickling error\n",
"\n",
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI, \n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=3,\n",
" opt_treatment='RESET')"
" opt_treatment=OptTreatment.RESET,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)"
]
},
{
Expand Down Expand Up @@ -590,4 +594,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -450,12 +450,16 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(\n",
" model_provider=model_interface, \n",
" task_keeper=task_interface,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=5,\n",
" opt_treatment='CONTINUE_GLOBAL'\n",
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED\n",
")"
]
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -360,11 +360,15 @@
"outputs": [],
"source": [
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI, \n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=5,\n",
" opt_treatment='CONTINUE_GLOBAL')"
" opt_treatment=OptTreatment.CONTINUE_GLOBAL,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -376,11 +376,15 @@
"# If I use autoreload I got a pickling error\n",
"\n",
"# The following command zips the workspace and python requirements to be transfered to collaborator nodes\n",
"from openfl.utilities.enum_types import DevicePolicy\n",
"from openfl.utilities.enum_types import OptTreatment\n",
"\n",
"fl_experiment.start(model_provider=MI, \n",
" task_keeper=TI,\n",
" data_loader=fed_dataset,\n",
" rounds_to_train=20,\n",
" opt_treatment='RESET')"
" opt_treatment=OptTreatment.RESET,\n",
" device_assignment_policy=DevicePolicy.CUDA_PREFERRED)"
]
},
{
Expand Down
56 changes: 9 additions & 47 deletions openfl/component/collaborator/collaborator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

"""Collaborator module."""

from enum import Enum
from logging import getLogger
from time import sleep
from typing import Tuple
Expand All @@ -12,35 +11,11 @@
from openfl.pipelines import NoCompressionPipeline
from openfl.pipelines import TensorCodec
from openfl.protocols import utils
from openfl.utilities.enum_types import DevicePolicy
from openfl.utilities.enum_types import OptTreatment
from openfl.utilities import TensorKey


class DevicePolicy(Enum):
"""Device assignment policy."""

CPU_ONLY = 1

CUDA_PREFERRED = 2


class OptTreatment(Enum):
"""Optimizer Methods.

- RESET tells each collaborator to reset the optimizer state at the beginning
of each round.

- CONTINUE_LOCAL tells each collaborator to continue with the local optimizer
state from the previous round.

- CONTINUE_GLOBAL tells each collaborator to continue with the federally
averaged optimizer state from the previous round.
"""

RESET = 1
CONTINUE_LOCAL = 2
CONTINUE_GLOBAL = 3


class Collaborator:
r"""The Collaborator object class.

Expand All @@ -49,8 +24,8 @@ class Collaborator:
aggregator_uuid: The unique id for the client
federation_uuid: The unique id for the federation
model: The model
opt_treatment* (string): The optimizer state treatment (Defaults to
"CONTINUE_GLOBAL", which is aggreagated state from previous round.)
opt_treatment* (enum.Enum): The optimizer state treatment (Defaults to
OptTreatment.CONTINUE_GLOBAL, which is aggreagated state from previous round.)

compression_pipeline: The compression pipeline (Defaults to None)

Expand All @@ -74,8 +49,8 @@ def __init__(self,
client,
task_runner,
task_config,
opt_treatment='RESET',
device_assignment_policy='CPU_ONLY',
opt_treatment=OptTreatment.RESET,
device_assignment_policy=DevicePolicy.CPU_ONLY,
delta_updates=False,
compression_pipeline=None,
db_store_rounds=1,
Expand Down Expand Up @@ -105,23 +80,10 @@ def __init__(self,

self.logger = getLogger(__name__)

# RESET/CONTINUE_LOCAL/CONTINUE_GLOBAL
if hasattr(OptTreatment, opt_treatment):
self.opt_treatment = OptTreatment[opt_treatment]
else:
self.logger.error(f'Unknown opt_treatment: {opt_treatment.name}.')
raise NotImplementedError(f'Unknown opt_treatment: {opt_treatment}.')

if hasattr(DevicePolicy, device_assignment_policy):
self.device_assignment_policy = DevicePolicy[device_assignment_policy]
else:
self.logger.error('Unknown device_assignment_policy: '
f'{device_assignment_policy.name}.')
raise NotImplementedError(
f'Unknown device_assignment_policy: {device_assignment_policy}.'
)
self.opt_treatment = opt_treatment
self.device_assignment_policy = device_assignment_policy

self.task_runner.set_optimizer_treatment(self.opt_treatment.name)
self.task_runner.set_optimizer_treatment(self.opt_treatment)

def set_available_devices(self, cuda: Tuple[str] = ()):
"""
Expand Down
Loading