Skip to content

Commit

Permalink
Fix lint with flake8
Browse files Browse the repository at this point in the history
Signed-off-by: kta-intel <[email protected]>
  • Loading branch information
kta-intel committed Jun 7, 2024
1 parent e4dde55 commit bea8729
Show file tree
Hide file tree
Showing 8 changed files with 37 additions and 32 deletions.
4 changes: 2 additions & 2 deletions openfl-workspace/tf_2dunet/src/tf_2dunet.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@

import tensorflow.compat.v1 as tf

from openfl.federated import TensorFlowTaskRunner_v1
from openfl.federated import TensorFlowTaskRunnerV1

tf.disable_v2_behavior()


class TensorFlow2DUNet(TensorFlowTaskRunner_v1):
class TensorFlow2DUNet(TensorFlowTaskRunnerV1):
"""Initialize.
Args:
Expand Down
1 change: 1 addition & 0 deletions openfl-workspace/tf_cnn_histology/src/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

logger = getLogger(__name__)


class TensorFlowHistologyInMemory(TensorFlowDataLoader):
"""TensorFlow Data Loader for Colorectal Histology Dataset."""

Expand Down
3 changes: 1 addition & 2 deletions openfl-workspace/tf_cnn_histology/src/taskrunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def build_model(self,
print(tf.config.threading.get_intra_op_parallelism_threads())
print(tf.config.threading.get_inter_op_parallelism_threads())

## Define Model using Functional API
# Define Model using Functional API

inputs = tf.keras.layers.Input(shape=input_shape)
conv = tf.keras.layers.Conv2D(
Expand Down Expand Up @@ -107,7 +107,6 @@ def build_model(self,

return model


def train_(self, batch_generator, metrics: list = None, **kwargs):
"""Train single epoch.
Expand Down
22 changes: 15 additions & 7 deletions openfl-workspace/tf_cnn_mnist/src/taskrunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,22 +46,30 @@ def build_model(self,
tensorflow.python.keras.engine.sequential.Sequential
"""

model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, kernel_size=(4, 4), strides=(2, 2), activation='relu', input_shape=input_shape),
tf.keras.layers.Conv2D(32, kernel_size=(4, 4), strides=(2, 2), activation='relu'),
tf.keras.layers.Conv2D(16,
kernel_size=(4, 4),
strides=(2, 2),
activation='relu',
input_shape=input_shape),
tf.keras.layers.Conv2D(32,
kernel_size=(4, 4),
strides=(2, 2),
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(num_classes, activation='softmax')
tf.keras.layers.Dense(100,
activation='relu'),
tf.keras.layers.Dense(num_classes,
activation='softmax')
])

model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])

return model



def train_(self, batch_generator, metrics: list = None, **kwargs):
"""Train single epoch.
Expand Down
3 changes: 1 addition & 2 deletions openfl/federated/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,8 @@
from .data import DataLoader # NOQA

if importlib.util.find_spec('tensorflow'):
from .task import TensorFlowTaskRunner, TensorFlowTaskRunner_v1, KerasTaskRunner, FederatedModel # NOQA
from .task import TensorFlowTaskRunner, TensorFlowTaskRunnerV1, KerasTaskRunner, FederatedModel # NOQA
from .data import TensorFlowDataLoader, KerasDataLoader, FederatedDataSet # NOQA
if importlib.util.find_spec('torch'):
from .task import PyTorchTaskRunner, FederatedModel # NOQA
from .data import PyTorchDataLoader, FederatedDataSet # NOQA

2 changes: 1 addition & 1 deletion openfl/federated/task/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from .runner import TaskRunner # NOQA

if importlib.util.find_spec('tensorflow'):
from .runner_tf import TensorFlowTaskRunner, TensorFlowTaskRunner_v1 # NOQA
from .runner_tf import TensorFlowTaskRunner, TensorFlowTaskRunnerV1 # NOQA
from .runner_keras import KerasTaskRunner # NOQA
from .fl_model import FederatedModel # NOQA
if importlib.util.find_spec('torch'):
Expand Down
4 changes: 2 additions & 2 deletions openfl/federated/task/runner_keras.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ def train_task(self, col_name, round_num, input_tensor_dict,
for epoch in range(epochs):
self.logger.info(f'Run {epoch} epoch of {round_num} round')
results = self.train_(self.data_loader.get_train_loader(batch_size),
metrics=metrics,
**kwargs)
metrics=metrics,
**kwargs)

# output metric tensors (scalar)
origin = col_name
Expand Down
30 changes: 14 additions & 16 deletions openfl/federated/task/runner_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from openfl.utilities.split import split_tensor_dict_for_holdouts
from .runner import TaskRunner

import tensorflow.compat.v1
from tqdm import tqdm


class TensorFlowTaskRunner(TaskRunner):
"""The base model for Keras models in the federation."""
Expand Down Expand Up @@ -70,8 +73,8 @@ def train_task(self, col_name, round_num, input_tensor_dict,
for epoch in range(epochs):
self.logger.info(f'Run {epoch} epoch of {round_num} round')
results = self.train_(self.data_loader.get_train_loader(batch_size),
metrics=metrics,
**kwargs)
metrics=metrics,
**kwargs)

# output metric tensors (scalar)
origin = col_name
Expand Down Expand Up @@ -260,7 +263,7 @@ def _get_weights_names(obj, with_opt_vars):
weight_names = [weight.name for weight in obj.weights]
else:
weight_names = [weight.name for weight in obj.variables]

weight_names = [weight.name for weight in obj.weights]
return weight_names

Expand Down Expand Up @@ -296,7 +299,6 @@ def _get_weights_dict(obj, suffix='', with_opt_vars=False):
weight_names = [weight.name for weight in obj.weights]
weight_values = obj.get_weights()


for name, value in zip(weight_names, weight_values):
weights_dict[name + suffix] = value
return weights_dict
Expand All @@ -315,7 +317,7 @@ def _set_weights_dict(obj, weights_dict, with_opt_vars=False):
Returns:
None
"""

if with_opt_vars:
# When acquiring optimizer weights, check optimizer version.
# Current optimizer does not use 'weights' attributes
Expand Down Expand Up @@ -344,15 +346,15 @@ def get_tensor_dict(self, with_opt_vars, suffix=''):
Returns:
dict: The tensor dictionary.
"""

model_weights = self._get_weights_dict(self.model, suffix)

if with_opt_vars:

opt_weights = self._get_weights_dict(self.model.optimizer, suffix, with_opt_vars)

model_weights.update(opt_weights)

if len(opt_weights) == 0:
self.logger.debug(
"WARNING: We didn't find variables for the optimizer.")
Expand Down Expand Up @@ -384,11 +386,11 @@ def set_tensor_dict(self, tensor_dict, with_opt_vars):
if 'legacy' in self.model.optimizer.__class__.__module__:
opt_weight_names = [
weight.name for weight in self.model.optimizer.weights
]
]
else:
opt_weight_names = [
weight.name for weight in self.model.optimizer.variables
]
]

opt_weights_dict = {
name: tensor_dict[name] for name in opt_weight_names
Expand Down Expand Up @@ -565,11 +567,7 @@ def initialize_tensorkeys_for_functions(self, with_opt_vars=False):
]


import tensorflow.compat.v1
from tqdm import tqdm


class TensorFlowTaskRunner_v1(TaskRunner):
class TensorFlowTaskRunnerV1(TaskRunner):
"""
Base class for TensorFlow models in the Federated Learning solution.
Expand Down Expand Up @@ -999,4 +997,4 @@ def tf_set_tensor_dict(tensor_dict, session, variables,
for k, v in tensor_dict.items():
session.run(assign_ops[k], feed_dict={placeholders[k]: v})

return assign_ops, placeholders
return assign_ops, placeholders

0 comments on commit bea8729

Please sign in to comment.